applied-ai-018 commited on
Commit
ea4d17f
·
verified ·
1 Parent(s): 1761927

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py +169 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py +445 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py +83 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py +753 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bracket.py +663 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py +524 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py +316 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_constraints.py +590 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py +728 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py +646 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiate.py +669 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct_py.py +278 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py +715 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py +430 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd +46 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py +543 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linesearch.py +897 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog.py +714 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py +1126 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py +1522 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py +5 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py +560 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_milp.py +392 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py +1157 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nnls.py +164 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nonlin.py +1584 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_numdiff.py +775 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_optimize.py +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py +522 -0
env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc ADDED
Binary file (805 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import numpy as np
3
+ import pytest
4
+
5
+ from scipy.fft._fftlog import fht, ifht, fhtoffset
6
+ from scipy.special import poch
7
+
8
+ from scipy.conftest import array_api_compatible
9
+ from scipy._lib._array_api import xp_assert_close
10
+
11
+ pytestmark = array_api_compatible
12
+
13
+
14
+ def test_fht_agrees_with_fftlog(xp):
15
+ # check that fht numerically agrees with the output from Fortran FFTLog,
16
+ # the results were generated with the provided `fftlogtest` program,
17
+ # after fixing how the k array is generated (divide range by n-1, not n)
18
+
19
+ # test function, analytical Hankel transform is of the same form
20
+ def f(r, mu):
21
+ return r**(mu+1)*np.exp(-r**2/2)
22
+
23
+ r = np.logspace(-4, 4, 16)
24
+
25
+ dln = np.log(r[1]/r[0])
26
+ mu = 0.3
27
+ offset = 0.0
28
+ bias = 0.0
29
+
30
+ a = xp.asarray(f(r, mu))
31
+
32
+ # test 1: compute as given
33
+ ours = fht(a, dln, mu, offset=offset, bias=bias)
34
+ theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
35
+ -0.1949518286432330E-02, +0.3789220182554077E-02,
36
+ +0.5093959119952945E-03, +0.2785387803618774E-01,
37
+ +0.9944952700848897E-01, +0.4599202164586588E+00,
38
+ +0.3157462160881342E+00, -0.8201236844404755E-03,
39
+ -0.7834031308271878E-03, +0.3931444945110708E-03,
40
+ -0.2697710625194777E-03, +0.3568398050238820E-03,
41
+ -0.5554454827797206E-03, +0.8286331026468585E-03]
42
+ theirs = xp.asarray(theirs, dtype=xp.float64)
43
+ xp_assert_close(ours, theirs)
44
+
45
+ # test 2: change to optimal offset
46
+ offset = fhtoffset(dln, mu, bias=bias)
47
+ ours = fht(a, dln, mu, offset=offset, bias=bias)
48
+ theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
49
+ +0.3150140927838524E-03, +0.9149121960963704E-03,
50
+ +0.5808089753959363E-02, +0.2548065256377240E-01,
51
+ +0.1339477692089897E+00, +0.4821530509479356E+00,
52
+ +0.2659899781579785E+00, -0.1116475278448113E-01,
53
+ +0.1791441617592385E-02, -0.4181810476548056E-03,
54
+ +0.1314963536765343E-03, -0.5422057743066297E-04,
55
+ +0.3208681804170443E-04, -0.2696849476008234E-04]
56
+ theirs = xp.asarray(theirs, dtype=xp.float64)
57
+ xp_assert_close(ours, theirs)
58
+
59
+ # test 3: positive bias
60
+ bias = 0.8
61
+ offset = fhtoffset(dln, mu, bias=bias)
62
+ ours = fht(a, dln, mu, offset=offset, bias=bias)
63
+ theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
64
+ +0.1065374386206564E+00, -0.5121739602708132E-01,
65
+ +0.2636649319269470E-01, +0.1697209218849693E-01,
66
+ +0.1250215614723183E+00, +0.4739583261486729E+00,
67
+ +0.2841149874912028E+00, -0.8312764741645729E-02,
68
+ +0.1024233505508988E-02, -0.1644902767389120E-03,
69
+ +0.3305775476926270E-04, -0.7786993194882709E-05,
70
+ +0.1962258449520547E-05, -0.8977895734909250E-06]
71
+ theirs = xp.asarray(theirs, dtype=xp.float64)
72
+ xp_assert_close(ours, theirs)
73
+
74
+ # test 4: negative bias
75
+ bias = -0.8
76
+ offset = fhtoffset(dln, mu, bias=bias)
77
+ ours = fht(a, dln, mu, offset=offset, bias=bias)
78
+ theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
79
+ +0.2123969254700955E-03, +0.1009558244834628E-02,
80
+ +0.5131386375222176E-02, +0.2461678673516286E-01,
81
+ +0.1235812845384476E+00, +0.4719570096404403E+00,
82
+ +0.2893487490631317E+00, -0.1686570611318716E-01,
83
+ +0.2231398155172505E-01, -0.1480742256379873E-01,
84
+ +0.1692387813500801E+00, +0.3097490354365797E+00,
85
+ +2.7593607182401860E+00, 10.5251075070045800E+00]
86
+ theirs = xp.asarray(theirs, dtype=xp.float64)
87
+ xp_assert_close(ours, theirs)
88
+
89
+
90
+ @pytest.mark.parametrize('optimal', [True, False])
91
+ @pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
92
+ @pytest.mark.parametrize('bias', [0, 0.1, -0.1])
93
+ @pytest.mark.parametrize('n', [64, 63])
94
+ def test_fht_identity(n, bias, offset, optimal, xp):
95
+ rng = np.random.RandomState(3491349965)
96
+
97
+ a = xp.asarray(rng.standard_normal(n))
98
+ dln = rng.uniform(-1, 1)
99
+ mu = rng.uniform(-2, 2)
100
+
101
+ if optimal:
102
+ offset = fhtoffset(dln, mu, initial=offset, bias=bias)
103
+
104
+ A = fht(a, dln, mu, offset=offset, bias=bias)
105
+ a_ = ifht(A, dln, mu, offset=offset, bias=bias)
106
+
107
+ xp_assert_close(a_, a)
108
+
109
+
110
+ def test_fht_special_cases(xp):
111
+ rng = np.random.RandomState(3491349965)
112
+
113
+ a = xp.asarray(rng.standard_normal(64))
114
+ dln = rng.uniform(-1, 1)
115
+
116
+ # let x = (mu+1+q)/2, y = (mu+1-q)/2, M = {0, -1, -2, ...}
117
+
118
+ # case 1: x in M, y in M => well-defined transform
119
+ mu, bias = -4.0, 1.0
120
+ with warnings.catch_warnings(record=True) as record:
121
+ fht(a, dln, mu, bias=bias)
122
+ assert not record, 'fht warned about a well-defined transform'
123
+
124
+ # case 2: x not in M, y in M => well-defined transform
125
+ mu, bias = -2.5, 0.5
126
+ with warnings.catch_warnings(record=True) as record:
127
+ fht(a, dln, mu, bias=bias)
128
+ assert not record, 'fht warned about a well-defined transform'
129
+
130
+ # case 3: x in M, y not in M => singular transform
131
+ mu, bias = -3.5, 0.5
132
+ with pytest.warns(Warning) as record:
133
+ fht(a, dln, mu, bias=bias)
134
+ assert record, 'fht did not warn about a singular transform'
135
+
136
+ # case 4: x not in M, y in M => singular inverse transform
137
+ mu, bias = -2.5, 0.5
138
+ with pytest.warns(Warning) as record:
139
+ ifht(a, dln, mu, bias=bias)
140
+ assert record, 'ifht did not warn about a singular transform'
141
+
142
+
143
+ @pytest.mark.parametrize('n', [64, 63])
144
+ def test_fht_exact(n, xp):
145
+ rng = np.random.RandomState(3491349965)
146
+
147
+ # for a(r) a power law r^\gamma, the fast Hankel transform produces the
148
+ # exact continuous Hankel transform if biased with q = \gamma
149
+
150
+ mu = rng.uniform(0, 3)
151
+
152
+ # convergence of HT: -1-mu < gamma < 1/2
153
+ gamma = rng.uniform(-1-mu, 1/2)
154
+
155
+ r = np.logspace(-2, 2, n)
156
+ a = xp.asarray(r**gamma)
157
+
158
+ dln = np.log(r[1]/r[0])
159
+
160
+ offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
161
+
162
+ A = fht(a, dln, mu, offset=offset, bias=gamma)
163
+
164
+ k = np.exp(offset)/r[::-1]
165
+
166
+ # analytical result
167
+ At = xp.asarray((2/k)**gamma * poch((mu+1-gamma)/2, gamma))
168
+
169
+ xp_assert_close(A, At)
env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Includes test functions for fftpack.helper module
2
+
3
+ Copied from fftpack.helper by Pearu Peterson, October 2005
4
+ Modified for Array API, 2023
5
+
6
+ """
7
+ from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes
8
+ from numpy.testing import assert_equal
9
+ from pytest import raises as assert_raises
10
+ import pytest
11
+ import numpy as np
12
+ import sys
13
+ from scipy.conftest import array_api_compatible
14
+ from scipy._lib._array_api import xp_assert_close, SCIPY_DEVICE
15
+ from scipy import fft
16
+
17
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
18
+ skip_if_array_api = pytest.mark.skip_if_array_api
19
+
20
+ _5_smooth_numbers = [
21
+ 2, 3, 4, 5, 6, 8, 9, 10,
22
+ 2 * 3 * 5,
23
+ 2**3 * 3**5,
24
+ 2**3 * 3**3 * 5**2,
25
+ ]
26
+
27
+ def test_next_fast_len():
28
+ for n in _5_smooth_numbers:
29
+ assert_equal(next_fast_len(n), n)
30
+
31
+
32
+ def _assert_n_smooth(x, n):
33
+ x_orig = x
34
+ if n < 2:
35
+ assert False
36
+
37
+ while True:
38
+ q, r = divmod(x, 2)
39
+ if r != 0:
40
+ break
41
+ x = q
42
+
43
+ for d in range(3, n+1, 2):
44
+ while True:
45
+ q, r = divmod(x, d)
46
+ if r != 0:
47
+ break
48
+ x = q
49
+
50
+ assert x == 1, \
51
+ f'x={x_orig} is not {n}-smooth, remainder={x}'
52
+
53
+
54
+ @skip_if_array_api(np_only=True)
55
+ class TestNextFastLen:
56
+
57
+ def test_next_fast_len(self):
58
+ np.random.seed(1234)
59
+
60
+ def nums():
61
+ yield from range(1, 1000)
62
+ yield 2**5 * 3**5 * 4**5 + 1
63
+
64
+ for n in nums():
65
+ m = next_fast_len(n)
66
+ _assert_n_smooth(m, 11)
67
+ assert m == next_fast_len(n, False)
68
+
69
+ m = next_fast_len(n, True)
70
+ _assert_n_smooth(m, 5)
71
+
72
+ def test_np_integers(self):
73
+ ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
74
+ for ityp in ITYPES:
75
+ x = ityp(12345)
76
+ testN = next_fast_len(x)
77
+ assert_equal(testN, next_fast_len(int(x)))
78
+
79
+ def testnext_fast_len_small(self):
80
+ hams = {
81
+ 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
82
+ 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000
83
+ }
84
+ for x, y in hams.items():
85
+ assert_equal(next_fast_len(x, True), y)
86
+
87
+ @pytest.mark.xfail(sys.maxsize < 2**32,
88
+ reason="Hamming Numbers too large for 32-bit",
89
+ raises=ValueError, strict=True)
90
+ def testnext_fast_len_big(self):
91
+ hams = {
92
+ 510183360: 510183360, 510183360 + 1: 512000000,
93
+ 511000000: 512000000,
94
+ 854296875: 854296875, 854296875 + 1: 859963392,
95
+ 196608000000: 196608000000, 196608000000 + 1: 196830000000,
96
+ 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
97
+ 206391214080000: 206391214080000,
98
+ 206391214080000 + 1: 206624260800000,
99
+ 470184984576000: 470184984576000,
100
+ 470184984576000 + 1: 470715894135000,
101
+ 7222041363087360: 7222041363087360,
102
+ 7222041363087360 + 1: 7230196133913600,
103
+ # power of 5 5**23
104
+ 11920928955078125: 11920928955078125,
105
+ 11920928955078125 - 1: 11920928955078125,
106
+ # power of 3 3**34
107
+ 16677181699666569: 16677181699666569,
108
+ 16677181699666569 - 1: 16677181699666569,
109
+ # power of 2 2**54
110
+ 18014398509481984: 18014398509481984,
111
+ 18014398509481984 - 1: 18014398509481984,
112
+ # above this, int(ceil(n)) == int(ceil(n+1))
113
+ 19200000000000000: 19200000000000000,
114
+ 19200000000000000 + 1: 19221679687500000,
115
+ 288230376151711744: 288230376151711744,
116
+ 288230376151711744 + 1: 288325195312500000,
117
+ 288325195312500000 - 1: 288325195312500000,
118
+ 288325195312500000: 288325195312500000,
119
+ 288325195312500000 + 1: 288555831593533440,
120
+ }
121
+ for x, y in hams.items():
122
+ assert_equal(next_fast_len(x, True), y)
123
+
124
+ def test_keyword_args(self):
125
+ assert next_fast_len(11, real=True) == 12
126
+ assert next_fast_len(target=7, real=False) == 7
127
+
128
+
129
+ @skip_if_array_api(cpu_only=True)
130
+ class Test_init_nd_shape_and_axes:
131
+
132
+ def test_py_0d_defaults(self, xp):
133
+ x = xp.asarray(4)
134
+ shape = None
135
+ axes = None
136
+
137
+ shape_expected = ()
138
+ axes_expected = []
139
+
140
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
141
+
142
+ assert shape_res == shape_expected
143
+ assert axes_res == axes_expected
144
+
145
+ def test_xp_0d_defaults(self, xp):
146
+ x = xp.asarray(7.)
147
+ shape = None
148
+ axes = None
149
+
150
+ shape_expected = ()
151
+ axes_expected = []
152
+
153
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
154
+
155
+ assert shape_res == shape_expected
156
+ assert axes_res == axes_expected
157
+
158
+ def test_py_1d_defaults(self, xp):
159
+ x = xp.asarray([1, 2, 3])
160
+ shape = None
161
+ axes = None
162
+
163
+ shape_expected = (3,)
164
+ axes_expected = [0]
165
+
166
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
167
+
168
+ assert shape_res == shape_expected
169
+ assert axes_res == axes_expected
170
+
171
+ def test_xp_1d_defaults(self, xp):
172
+ x = xp.arange(0, 1, .1)
173
+ shape = None
174
+ axes = None
175
+
176
+ shape_expected = (10,)
177
+ axes_expected = [0]
178
+
179
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
180
+
181
+ assert shape_res == shape_expected
182
+ assert axes_res == axes_expected
183
+
184
+ def test_py_2d_defaults(self, xp):
185
+ x = xp.asarray([[1, 2, 3, 4],
186
+ [5, 6, 7, 8]])
187
+ shape = None
188
+ axes = None
189
+
190
+ shape_expected = (2, 4)
191
+ axes_expected = [0, 1]
192
+
193
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
194
+
195
+ assert shape_res == shape_expected
196
+ assert axes_res == axes_expected
197
+
198
+ def test_xp_2d_defaults(self, xp):
199
+ x = xp.arange(0, 1, .1)
200
+ x = xp.reshape(x, (5, 2))
201
+ shape = None
202
+ axes = None
203
+
204
+ shape_expected = (5, 2)
205
+ axes_expected = [0, 1]
206
+
207
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
208
+
209
+ assert shape_res == shape_expected
210
+ assert axes_res == axes_expected
211
+
212
+ def test_xp_5d_defaults(self, xp):
213
+ x = xp.zeros([6, 2, 5, 3, 4])
214
+ shape = None
215
+ axes = None
216
+
217
+ shape_expected = (6, 2, 5, 3, 4)
218
+ axes_expected = [0, 1, 2, 3, 4]
219
+
220
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
221
+
222
+ assert shape_res == shape_expected
223
+ assert axes_res == axes_expected
224
+
225
+ def test_xp_5d_set_shape(self, xp):
226
+ x = xp.zeros([6, 2, 5, 3, 4])
227
+ shape = [10, -1, -1, 1, 4]
228
+ axes = None
229
+
230
+ shape_expected = (10, 2, 5, 1, 4)
231
+ axes_expected = [0, 1, 2, 3, 4]
232
+
233
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
234
+
235
+ assert shape_res == shape_expected
236
+ assert axes_res == axes_expected
237
+
238
+ def test_xp_5d_set_axes(self, xp):
239
+ x = xp.zeros([6, 2, 5, 3, 4])
240
+ shape = None
241
+ axes = [4, 1, 2]
242
+
243
+ shape_expected = (4, 2, 5)
244
+ axes_expected = [4, 1, 2]
245
+
246
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
247
+
248
+ assert shape_res == shape_expected
249
+ assert axes_res == axes_expected
250
+
251
+ def test_xp_5d_set_shape_axes(self, xp):
252
+ x = xp.zeros([6, 2, 5, 3, 4])
253
+ shape = [10, -1, 2]
254
+ axes = [1, 0, 3]
255
+
256
+ shape_expected = (10, 6, 2)
257
+ axes_expected = [1, 0, 3]
258
+
259
+ shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
260
+
261
+ assert shape_res == shape_expected
262
+ assert axes_res == axes_expected
263
+
264
+ def test_shape_axes_subset(self, xp):
265
+ x = xp.zeros((2, 3, 4, 5))
266
+ shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
267
+
268
+ assert shape == (5, 5, 5)
269
+ assert axes == [1, 2, 3]
270
+
271
+ def test_errors(self, xp):
272
+ x = xp.zeros(1)
273
+ with assert_raises(ValueError, match="axes must be a scalar or "
274
+ "iterable of integers"):
275
+ _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
276
+
277
+ with assert_raises(ValueError, match="axes must be a scalar or "
278
+ "iterable of integers"):
279
+ _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
280
+
281
+ with assert_raises(ValueError,
282
+ match="axes exceeds dimensionality of input"):
283
+ _init_nd_shape_and_axes(x, shape=None, axes=[1])
284
+
285
+ with assert_raises(ValueError,
286
+ match="axes exceeds dimensionality of input"):
287
+ _init_nd_shape_and_axes(x, shape=None, axes=[-2])
288
+
289
+ with assert_raises(ValueError,
290
+ match="all axes must be unique"):
291
+ _init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
292
+
293
+ with assert_raises(ValueError, match="shape must be a scalar or "
294
+ "iterable of integers"):
295
+ _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
296
+
297
+ with assert_raises(ValueError, match="shape must be a scalar or "
298
+ "iterable of integers"):
299
+ _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
300
+
301
+ with assert_raises(ValueError,
302
+ match="when given, axes and shape arguments"
303
+ " have to be of the same length"):
304
+ _init_nd_shape_and_axes(xp.zeros([1, 1, 1, 1]),
305
+ shape=[1, 2, 3], axes=[1])
306
+
307
+ with assert_raises(ValueError,
308
+ match="invalid number of data points"
309
+ r" \(\[0\]\) specified"):
310
+ _init_nd_shape_and_axes(x, shape=[0], axes=None)
311
+
312
+ with assert_raises(ValueError,
313
+ match="invalid number of data points"
314
+ r" \(\[-2\]\) specified"):
315
+ _init_nd_shape_and_axes(x, shape=-2, axes=None)
316
+
317
+
318
+ @skip_if_array_api('torch',
319
+ reasons=['torch.fft not yet implemented by array-api-compat'])
320
+ class TestFFTShift:
321
+
322
+ def test_definition(self, xp):
323
+ x = xp.asarray([0., 1, 2, 3, 4, -4, -3, -2, -1])
324
+ y = xp.asarray([-4., -3, -2, -1, 0, 1, 2, 3, 4])
325
+ xp_assert_close(fft.fftshift(x), y)
326
+ xp_assert_close(fft.ifftshift(y), x)
327
+ x = xp.asarray([0., 1, 2, 3, 4, -5, -4, -3, -2, -1])
328
+ y = xp.asarray([-5., -4, -3, -2, -1, 0, 1, 2, 3, 4])
329
+ xp_assert_close(fft.fftshift(x), y)
330
+ xp_assert_close(fft.ifftshift(y), x)
331
+
332
+ def test_inverse(self, xp):
333
+ for n in [1, 4, 9, 100, 211]:
334
+ x = xp.asarray(np.random.random((n,)))
335
+ xp_assert_close(fft.ifftshift(fft.fftshift(x)), x)
336
+
337
+ def test_axes_keyword(self, xp):
338
+ freqs = xp.asarray([[0., 1, 2], [3, 4, -4], [-3, -2, -1]])
339
+ shifted = xp.asarray([[-1., -3, -2], [2, 0, 1], [-4, 3, 4]])
340
+ xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shifted)
341
+ xp_assert_close(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,)))
342
+ xp_assert_close(fft.ifftshift(shifted, axes=(0, 1)), freqs)
343
+ xp_assert_close(fft.ifftshift(shifted, axes=0),
344
+ fft.ifftshift(shifted, axes=(0,)))
345
+ xp_assert_close(fft.fftshift(freqs), shifted)
346
+ xp_assert_close(fft.ifftshift(shifted), freqs)
347
+
348
+ def test_uneven_dims(self, xp):
349
+ """ Test 2D input, which has uneven dimension sizes """
350
+ freqs = xp.asarray([
351
+ [0, 1],
352
+ [2, 3],
353
+ [4, 5]
354
+ ], dtype=xp.float64)
355
+
356
+ # shift in dimension 0
357
+ shift_dim0 = xp.asarray([
358
+ [4, 5],
359
+ [0, 1],
360
+ [2, 3]
361
+ ], dtype=xp.float64)
362
+ xp_assert_close(fft.fftshift(freqs, axes=0), shift_dim0)
363
+ xp_assert_close(fft.ifftshift(shift_dim0, axes=0), freqs)
364
+ xp_assert_close(fft.fftshift(freqs, axes=(0,)), shift_dim0)
365
+ xp_assert_close(fft.ifftshift(shift_dim0, axes=[0]), freqs)
366
+
367
+ # shift in dimension 1
368
+ shift_dim1 = xp.asarray([
369
+ [1, 0],
370
+ [3, 2],
371
+ [5, 4]
372
+ ], dtype=xp.float64)
373
+ xp_assert_close(fft.fftshift(freqs, axes=1), shift_dim1)
374
+ xp_assert_close(fft.ifftshift(shift_dim1, axes=1), freqs)
375
+
376
+ # shift in both dimensions
377
+ shift_dim_both = xp.asarray([
378
+ [5, 4],
379
+ [1, 0],
380
+ [3, 2]
381
+ ], dtype=xp.float64)
382
+ xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
383
+ xp_assert_close(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
384
+ xp_assert_close(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
385
+ xp_assert_close(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
386
+
387
+ # axes=None (default) shift in all dimensions
388
+ xp_assert_close(fft.fftshift(freqs, axes=None), shift_dim_both)
389
+ xp_assert_close(fft.ifftshift(shift_dim_both, axes=None), freqs)
390
+ xp_assert_close(fft.fftshift(freqs), shift_dim_both)
391
+ xp_assert_close(fft.ifftshift(shift_dim_both), freqs)
392
+
393
+
394
+ @skip_if_array_api('array_api_strict', 'cupy',
395
+ reasons=['fft not yet implemented by array-api-strict',
396
+ 'cupy.fft not yet implemented by array-api-compat'])
397
+ class TestFFTFreq:
398
+
399
+ def test_definition(self, xp):
400
+ device = SCIPY_DEVICE
401
+ try:
402
+ x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1],
403
+ dtype=xp.float64, device=device)
404
+ x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1],
405
+ dtype=xp.float64, device=device)
406
+ except TypeError:
407
+ x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], dtype=xp.float64)
408
+ x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1],
409
+ dtype=xp.float64)
410
+
411
+ y = xp.asarray(9 * fft.fftfreq(9, xp=xp), dtype=xp.float64)
412
+ xp_assert_close(y, x)
413
+ y = xp.asarray(9 * xp.pi * fft.fftfreq(9, xp.pi, xp=xp), dtype=xp.float64)
414
+ xp_assert_close(y, x)
415
+
416
+ y = xp.asarray(10 * fft.fftfreq(10, xp=xp), dtype=xp.float64)
417
+ xp_assert_close(y, x2)
418
+ y = xp.asarray(10 * xp.pi * fft.fftfreq(10, xp.pi, xp=xp), dtype=xp.float64)
419
+ xp_assert_close(y, x2)
420
+
421
+
422
+ @skip_if_array_api('array_api_strict', 'cupy',
423
+ reasons=['fft not yet implemented by array-api-strict',
424
+ 'cupy.fft not yet implemented by array-api-compat'])
425
+ class TestRFFTFreq:
426
+
427
+ def test_definition(self, xp):
428
+ device = SCIPY_DEVICE
429
+ try:
430
+ x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64, device=device)
431
+ x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64, device=device)
432
+ except TypeError:
433
+ # work around the `device` keyword not being implemented in numpy yet
434
+ x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64)
435
+ x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64)
436
+
437
+ y = xp.asarray(9 * fft.rfftfreq(9, xp=xp), dtype=xp.float64)
438
+ xp_assert_close(y, x)
439
+ y = xp.asarray(9 * xp.pi * fft.rfftfreq(9, xp.pi, xp=xp), dtype=xp.float64)
440
+ xp_assert_close(y, x)
441
+
442
+ y = xp.asarray(10 * fft.rfftfreq(10, xp=xp), dtype=xp.float64)
443
+ xp_assert_close(y, x2)
444
+ y = xp.asarray(10 * xp.pi * fft.rfftfreq(10, xp.pi, xp=xp), dtype=xp.float64)
445
+ xp_assert_close(y, x2)
env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy import fft
2
+ import numpy as np
3
+ import pytest
4
+ from numpy.testing import assert_allclose
5
+ import multiprocessing
6
+ import os
7
+
8
+
9
+ @pytest.fixture(scope='module')
10
+ def x():
11
+ return np.random.randn(512, 128) # Must be large enough to qualify for mt
12
+
13
+
14
+ @pytest.mark.parametrize("func", [
15
+ fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn,
16
+ fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn,
17
+ fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn,
18
+ fft.dct, fft.idct, fft.dctn, fft.idctn,
19
+ fft.dst, fft.idst, fft.dstn, fft.idstn,
20
+ ])
21
+ @pytest.mark.parametrize("workers", [2, -1])
22
+ def test_threaded_same(x, func, workers):
23
+ expected = func(x, workers=1)
24
+ actual = func(x, workers=workers)
25
+ assert_allclose(actual, expected)
26
+
27
+
28
+ def _mt_fft(x):
29
+ return fft.fft(x, workers=2)
30
+
31
+
32
+ def test_mixed_threads_processes(x):
33
+ # Test that the fft threadpool is safe to use before & after fork
34
+
35
+ expect = fft.fft(x, workers=2)
36
+
37
+ with multiprocessing.Pool(2) as p:
38
+ res = p.map(_mt_fft, [x for _ in range(4)])
39
+
40
+ for r in res:
41
+ assert_allclose(r, expect)
42
+
43
+ fft.fft(x, workers=2)
44
+
45
+
46
+ def test_invalid_workers(x):
47
+ cpus = os.cpu_count()
48
+
49
+ fft.ifft([1], workers=-cpus)
50
+
51
+ with pytest.raises(ValueError, match='workers must not be zero'):
52
+ fft.fft(x, workers=0)
53
+
54
+ with pytest.raises(ValueError, match='workers value out of range'):
55
+ fft.ifft(x, workers=-cpus-1)
56
+
57
+
58
+ def test_set_get_workers():
59
+ cpus = os.cpu_count()
60
+ assert fft.get_workers() == 1
61
+ with fft.set_workers(4):
62
+ assert fft.get_workers() == 4
63
+
64
+ with fft.set_workers(-1):
65
+ assert fft.get_workers() == cpus
66
+
67
+ assert fft.get_workers() == 4
68
+
69
+ assert fft.get_workers() == 1
70
+
71
+ with fft.set_workers(-cpus):
72
+ assert fft.get_workers() == 1
73
+
74
+
75
+ def test_set_workers_invalid():
76
+
77
+ with pytest.raises(ValueError, match='workers must not be zero'):
78
+ with fft.set_workers(0):
79
+ pass
80
+
81
+ with pytest.raises(ValueError, match='workers value out of range'):
82
+ with fft.set_workers(-os.cpu_count()-1):
83
+ pass
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ basinhopping: The basinhopping global optimization algorithm
3
+ """
4
+ import numpy as np
5
+ import math
6
+ import inspect
7
+ import scipy.optimize
8
+ from scipy._lib._util import check_random_state
9
+
10
+ __all__ = ['basinhopping']
11
+
12
+
13
+ _params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY),
14
+ inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY))
15
+ _new_accept_test_signature = inspect.Signature(parameters=_params)
16
+
17
+
18
+ class Storage:
19
+ """
20
+ Class used to store the lowest energy structure
21
+ """
22
+ def __init__(self, minres):
23
+ self._add(minres)
24
+
25
+ def _add(self, minres):
26
+ self.minres = minres
27
+ self.minres.x = np.copy(minres.x)
28
+
29
+ def update(self, minres):
30
+ if minres.success and (minres.fun < self.minres.fun
31
+ or not self.minres.success):
32
+ self._add(minres)
33
+ return True
34
+ else:
35
+ return False
36
+
37
+ def get_lowest(self):
38
+ return self.minres
39
+
40
+
41
+ class BasinHoppingRunner:
42
+ """This class implements the core of the basinhopping algorithm.
43
+
44
+ x0 : ndarray
45
+ The starting coordinates.
46
+ minimizer : callable
47
+ The local minimizer, with signature ``result = minimizer(x)``.
48
+ The return value is an `optimize.OptimizeResult` object.
49
+ step_taking : callable
50
+ This function displaces the coordinates randomly. Signature should
51
+ be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
52
+ accept_tests : list of callables
53
+ Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
54
+ `x_old`. These tests will be used to judge whether or not to accept
55
+ the step. The acceptable return values are True, False, or ``"force
56
+ accept"``. If any of the tests return False then the step is rejected.
57
+ If ``"force accept"``, then this will override any other tests in
58
+ order to accept the step. This can be used, for example, to forcefully
59
+ escape from a local minimum that ``basinhopping`` is trapped in.
60
+ disp : bool, optional
61
+ Display status messages.
62
+
63
+ """
64
+ def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
65
+ self.x = np.copy(x0)
66
+ self.minimizer = minimizer
67
+ self.step_taking = step_taking
68
+ self.accept_tests = accept_tests
69
+ self.disp = disp
70
+
71
+ self.nstep = 0
72
+
73
+ # initialize return object
74
+ self.res = scipy.optimize.OptimizeResult()
75
+ self.res.minimization_failures = 0
76
+
77
+ # do initial minimization
78
+ minres = minimizer(self.x)
79
+ if not minres.success:
80
+ self.res.minimization_failures += 1
81
+ if self.disp:
82
+ print("warning: basinhopping: local minimization failure")
83
+ self.x = np.copy(minres.x)
84
+ self.energy = minres.fun
85
+ self.incumbent_minres = minres # best minimize result found so far
86
+ if self.disp:
87
+ print("basinhopping step %d: f %g" % (self.nstep, self.energy))
88
+
89
+ # initialize storage class
90
+ self.storage = Storage(minres)
91
+
92
+ if hasattr(minres, "nfev"):
93
+ self.res.nfev = minres.nfev
94
+ if hasattr(minres, "njev"):
95
+ self.res.njev = minres.njev
96
+ if hasattr(minres, "nhev"):
97
+ self.res.nhev = minres.nhev
98
+
99
+ def _monte_carlo_step(self):
100
+ """Do one Monte Carlo iteration
101
+
102
+ Randomly displace the coordinates, minimize, and decide whether
103
+ or not to accept the new coordinates.
104
+ """
105
+ # Take a random step. Make a copy of x because the step_taking
106
+ # algorithm might change x in place
107
+ x_after_step = np.copy(self.x)
108
+ x_after_step = self.step_taking(x_after_step)
109
+
110
+ # do a local minimization
111
+ minres = self.minimizer(x_after_step)
112
+ x_after_quench = minres.x
113
+ energy_after_quench = minres.fun
114
+ if not minres.success:
115
+ self.res.minimization_failures += 1
116
+ if self.disp:
117
+ print("warning: basinhopping: local minimization failure")
118
+ if hasattr(minres, "nfev"):
119
+ self.res.nfev += minres.nfev
120
+ if hasattr(minres, "njev"):
121
+ self.res.njev += minres.njev
122
+ if hasattr(minres, "nhev"):
123
+ self.res.nhev += minres.nhev
124
+
125
+ # accept the move based on self.accept_tests. If any test is False,
126
+ # then reject the step. If any test returns the special string
127
+ # 'force accept', then accept the step regardless. This can be used
128
+ # to forcefully escape from a local minimum if normal basin hopping
129
+ # steps are not sufficient.
130
+ accept = True
131
+ for test in self.accept_tests:
132
+ if inspect.signature(test) == _new_accept_test_signature:
133
+ testres = test(res_new=minres, res_old=self.incumbent_minres)
134
+ else:
135
+ testres = test(f_new=energy_after_quench, x_new=x_after_quench,
136
+ f_old=self.energy, x_old=self.x)
137
+
138
+ if testres == 'force accept':
139
+ accept = True
140
+ break
141
+ elif testres is None:
142
+ raise ValueError("accept_tests must return True, False, or "
143
+ "'force accept'")
144
+ elif not testres:
145
+ accept = False
146
+
147
+ # Report the result of the acceptance test to the take step class.
148
+ # This is for adaptive step taking
149
+ if hasattr(self.step_taking, "report"):
150
+ self.step_taking.report(accept, f_new=energy_after_quench,
151
+ x_new=x_after_quench, f_old=self.energy,
152
+ x_old=self.x)
153
+
154
+ return accept, minres
155
+
156
+ def one_cycle(self):
157
+ """Do one cycle of the basinhopping algorithm
158
+ """
159
+ self.nstep += 1
160
+ new_global_min = False
161
+
162
+ accept, minres = self._monte_carlo_step()
163
+
164
+ if accept:
165
+ self.energy = minres.fun
166
+ self.x = np.copy(minres.x)
167
+ self.incumbent_minres = minres # best minimize result found so far
168
+ new_global_min = self.storage.update(minres)
169
+
170
+ # print some information
171
+ if self.disp:
172
+ self.print_report(minres.fun, accept)
173
+ if new_global_min:
174
+ print("found new global minimum on step %d with function"
175
+ " value %g" % (self.nstep, self.energy))
176
+
177
+ # save some variables as BasinHoppingRunner attributes
178
+ self.xtrial = minres.x
179
+ self.energy_trial = minres.fun
180
+ self.accept = accept
181
+
182
+ return new_global_min
183
+
184
+ def print_report(self, energy_trial, accept):
185
+ """print a status update"""
186
+ minres = self.storage.get_lowest()
187
+ print("basinhopping step %d: f %g trial_f %g accepted %d "
188
+ " lowest_f %g" % (self.nstep, self.energy, energy_trial,
189
+ accept, minres.fun))
190
+
191
+
192
+ class AdaptiveStepsize:
193
+ """
194
+ Class to implement adaptive stepsize.
195
+
196
+ This class wraps the step taking class and modifies the stepsize to
197
+ ensure the true acceptance rate is as close as possible to the target.
198
+
199
+ Parameters
200
+ ----------
201
+ takestep : callable
202
+ The step taking routine. Must contain modifiable attribute
203
+ takestep.stepsize
204
+ accept_rate : float, optional
205
+ The target step acceptance rate
206
+ interval : int, optional
207
+ Interval for how often to update the stepsize
208
+ factor : float, optional
209
+ The step size is multiplied or divided by this factor upon each
210
+ update.
211
+ verbose : bool, optional
212
+ Print information about each update
213
+
214
+ """
215
+ def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,
216
+ verbose=True):
217
+ self.takestep = takestep
218
+ self.target_accept_rate = accept_rate
219
+ self.interval = interval
220
+ self.factor = factor
221
+ self.verbose = verbose
222
+
223
+ self.nstep = 0
224
+ self.nstep_tot = 0
225
+ self.naccept = 0
226
+
227
+ def __call__(self, x):
228
+ return self.take_step(x)
229
+
230
+ def _adjust_step_size(self):
231
+ old_stepsize = self.takestep.stepsize
232
+ accept_rate = float(self.naccept) / self.nstep
233
+ if accept_rate > self.target_accept_rate:
234
+ # We're accepting too many steps. This generally means we're
235
+ # trapped in a basin. Take bigger steps.
236
+ self.takestep.stepsize /= self.factor
237
+ else:
238
+ # We're not accepting enough steps. Take smaller steps.
239
+ self.takestep.stepsize *= self.factor
240
+ if self.verbose:
241
+ print("adaptive stepsize: acceptance rate {:f} target {:f} new "
242
+ "stepsize {:g} old stepsize {:g}".format(accept_rate,
243
+ self.target_accept_rate, self.takestep.stepsize,
244
+ old_stepsize))
245
+
246
+ def take_step(self, x):
247
+ self.nstep += 1
248
+ self.nstep_tot += 1
249
+ if self.nstep % self.interval == 0:
250
+ self._adjust_step_size()
251
+ return self.takestep(x)
252
+
253
+ def report(self, accept, **kwargs):
254
+ "called by basinhopping to report the result of the step"
255
+ if accept:
256
+ self.naccept += 1
257
+
258
+
259
+ class RandomDisplacement:
260
+ """Add a random displacement of maximum size `stepsize` to each coordinate.
261
+
262
+ Calling this updates `x` in-place.
263
+
264
+ Parameters
265
+ ----------
266
+ stepsize : float, optional
267
+ Maximum stepsize in any dimension
268
+ random_gen : {None, int, `numpy.random.Generator`,
269
+ `numpy.random.RandomState`}, optional
270
+
271
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
272
+ singleton is used.
273
+ If `seed` is an int, a new ``RandomState`` instance is used,
274
+ seeded with `seed`.
275
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
276
+ that instance is used.
277
+
278
+ """
279
+
280
+ def __init__(self, stepsize=0.5, random_gen=None):
281
+ self.stepsize = stepsize
282
+ self.random_gen = check_random_state(random_gen)
283
+
284
+ def __call__(self, x):
285
+ x += self.random_gen.uniform(-self.stepsize, self.stepsize,
286
+ np.shape(x))
287
+ return x
288
+
289
+
290
+ class MinimizerWrapper:
291
+ """
292
+ wrap a minimizer function as a minimizer class
293
+ """
294
+ def __init__(self, minimizer, func=None, **kwargs):
295
+ self.minimizer = minimizer
296
+ self.func = func
297
+ self.kwargs = kwargs
298
+
299
+ def __call__(self, x0):
300
+ if self.func is None:
301
+ return self.minimizer(x0, **self.kwargs)
302
+ else:
303
+ return self.minimizer(self.func, x0, **self.kwargs)
304
+
305
+
306
+ class Metropolis:
307
+ """Metropolis acceptance criterion.
308
+
309
+ Parameters
310
+ ----------
311
+ T : float
312
+ The "temperature" parameter for the accept or reject criterion.
313
+ random_gen : {None, int, `numpy.random.Generator`,
314
+ `numpy.random.RandomState`}, optional
315
+
316
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
317
+ singleton is used.
318
+ If `seed` is an int, a new ``RandomState`` instance is used,
319
+ seeded with `seed`.
320
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
321
+ that instance is used.
322
+ Random number generator used for acceptance test.
323
+
324
+ """
325
+
326
+ def __init__(self, T, random_gen=None):
327
+ # Avoid ZeroDivisionError since "MBH can be regarded as a special case
328
+ # of the BH framework with the Metropolis criterion, where temperature
329
+ # T = 0." (Reject all steps that increase energy.)
330
+ self.beta = 1.0 / T if T != 0 else float('inf')
331
+ self.random_gen = check_random_state(random_gen)
332
+
333
+ def accept_reject(self, res_new, res_old):
334
+ """
335
+ Assuming the local search underlying res_new was successful:
336
+ If new energy is lower than old, it will always be accepted.
337
+ If new is higher than old, there is a chance it will be accepted,
338
+ less likely for larger differences.
339
+ """
340
+ with np.errstate(invalid='ignore'):
341
+ # The energy values being fed to Metropolis are 1-length arrays, and if
342
+ # they are equal, their difference is 0, which gets multiplied by beta,
343
+ # which is inf, and array([0]) * float('inf') causes
344
+ #
345
+ # RuntimeWarning: invalid value encountered in multiply
346
+ #
347
+ # Ignore this warning so when the algorithm is on a flat plane, it always
348
+ # accepts the step, to try to move off the plane.
349
+ prod = -(res_new.fun - res_old.fun) * self.beta
350
+ w = math.exp(min(0, prod))
351
+
352
+ rand = self.random_gen.uniform()
353
+ return w >= rand and (res_new.success or not res_old.success)
354
+
355
+ def __call__(self, *, res_new, res_old):
356
+ """
357
+ f_new and f_old are mandatory in kwargs
358
+ """
359
+ return bool(self.accept_reject(res_new, res_old))
360
+
361
+
362
+ def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
363
+ minimizer_kwargs=None, take_step=None, accept_test=None,
364
+ callback=None, interval=50, disp=False, niter_success=None,
365
+ seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
366
+ """Find the global minimum of a function using the basin-hopping algorithm.
367
+
368
+ Basin-hopping is a two-phase method that combines a global stepping
369
+ algorithm with local minimization at each step. Designed to mimic
370
+ the natural process of energy minimization of clusters of atoms, it works
371
+ well for similar problems with "funnel-like, but rugged" energy landscapes
372
+ [5]_.
373
+
374
+ As the step-taking, step acceptance, and minimization methods are all
375
+ customizable, this function can also be used to implement other two-phase
376
+ methods.
377
+
378
+ Parameters
379
+ ----------
380
+ func : callable ``f(x, *args)``
381
+ Function to be optimized. ``args`` can be passed as an optional item
382
+ in the dict `minimizer_kwargs`
383
+ x0 : array_like
384
+ Initial guess.
385
+ niter : integer, optional
386
+ The number of basin-hopping iterations. There will be a total of
387
+ ``niter + 1`` runs of the local minimizer.
388
+ T : float, optional
389
+ The "temperature" parameter for the acceptance or rejection criterion.
390
+ Higher "temperatures" mean that larger jumps in function value will be
391
+ accepted. For best results `T` should be comparable to the
392
+ separation (in function value) between local minima.
393
+ stepsize : float, optional
394
+ Maximum step size for use in the random displacement.
395
+ minimizer_kwargs : dict, optional
396
+ Extra keyword arguments to be passed to the local minimizer
397
+ `scipy.optimize.minimize` Some important options could be:
398
+
399
+ method : str
400
+ The minimization method (e.g. ``"L-BFGS-B"``)
401
+ args : tuple
402
+ Extra arguments passed to the objective function (`func`) and
403
+ its derivatives (Jacobian, Hessian).
404
+
405
+ take_step : callable ``take_step(x)``, optional
406
+ Replace the default step-taking routine with this routine. The default
407
+ step-taking routine is a random displacement of the coordinates, but
408
+ other step-taking algorithms may be better for some systems.
409
+ `take_step` can optionally have the attribute ``take_step.stepsize``.
410
+ If this attribute exists, then `basinhopping` will adjust
411
+ ``take_step.stepsize`` in order to try to optimize the global minimum
412
+ search.
413
+ accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
414
+ Define a test which will be used to judge whether to accept the
415
+ step. This will be used in addition to the Metropolis test based on
416
+ "temperature" `T`. The acceptable return values are True,
417
+ False, or ``"force accept"``. If any of the tests return False
418
+ then the step is rejected. If the latter, then this will override any
419
+ other tests in order to accept the step. This can be used, for example,
420
+ to forcefully escape from a local minimum that `basinhopping` is
421
+ trapped in.
422
+ callback : callable, ``callback(x, f, accept)``, optional
423
+ A callback function which will be called for all minima found. ``x``
424
+ and ``f`` are the coordinates and function value of the trial minimum,
425
+ and ``accept`` is whether that minimum was accepted. This can
426
+ be used, for example, to save the lowest N minima found. Also,
427
+ `callback` can be used to specify a user defined stop criterion by
428
+ optionally returning True to stop the `basinhopping` routine.
429
+ interval : integer, optional
430
+ interval for how often to update the `stepsize`
431
+ disp : bool, optional
432
+ Set to True to print status messages
433
+ niter_success : integer, optional
434
+ Stop the run if the global minimum candidate remains the same for this
435
+ number of iterations.
436
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
437
+
438
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
439
+ singleton is used.
440
+ If `seed` is an int, a new ``RandomState`` instance is used,
441
+ seeded with `seed`.
442
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
443
+ that instance is used.
444
+ Specify `seed` for repeatable minimizations. The random numbers
445
+ generated with this seed only affect the default Metropolis
446
+ `accept_test` and the default `take_step`. If you supply your own
447
+ `take_step` and `accept_test`, and these functions use random
448
+ number generation, then those functions are responsible for the state
449
+ of their random number generator.
450
+ target_accept_rate : float, optional
451
+ The target acceptance rate that is used to adjust the `stepsize`.
452
+ If the current acceptance rate is greater than the target,
453
+ then the `stepsize` is increased. Otherwise, it is decreased.
454
+ Range is (0, 1). Default is 0.5.
455
+
456
+ .. versionadded:: 1.8.0
457
+
458
+ stepwise_factor : float, optional
459
+ The `stepsize` is multiplied or divided by this stepwise factor upon
460
+ each update. Range is (0, 1). Default is 0.9.
461
+
462
+ .. versionadded:: 1.8.0
463
+
464
+ Returns
465
+ -------
466
+ res : OptimizeResult
467
+ The optimization result represented as a `OptimizeResult` object.
468
+ Important attributes are: ``x`` the solution array, ``fun`` the value
469
+ of the function at the solution, and ``message`` which describes the
470
+ cause of the termination. The ``OptimizeResult`` object returned by the
471
+ selected minimizer at the lowest minimum is also contained within this
472
+ object and can be accessed through the ``lowest_optimization_result``
473
+ attribute. See `OptimizeResult` for a description of other attributes.
474
+
475
+ See Also
476
+ --------
477
+ minimize :
478
+ The local minimization function called once for each basinhopping step.
479
+ `minimizer_kwargs` is passed to this routine.
480
+
481
+ Notes
482
+ -----
483
+ Basin-hopping is a stochastic algorithm which attempts to find the global
484
+ minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
485
+ [4]_. The algorithm in its current form was described by David Wales and
486
+ Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
487
+
488
+ The algorithm is iterative with each cycle composed of the following
489
+ features
490
+
491
+ 1) random perturbation of the coordinates
492
+
493
+ 2) local minimization
494
+
495
+ 3) accept or reject the new coordinates based on the minimized function
496
+ value
497
+
498
+ The acceptance test used here is the Metropolis criterion of standard Monte
499
+ Carlo algorithms, although there are many other possibilities [3]_.
500
+
501
+ This global minimization method has been shown to be extremely efficient
502
+ for a wide variety of problems in physics and chemistry. It is
503
+ particularly useful when the function has many minima separated by large
504
+ barriers. See the `Cambridge Cluster Database
505
+ <https://www-wales.ch.cam.ac.uk/CCD.html>`_ for databases of molecular
506
+ systems that have been optimized primarily using basin-hopping. This
507
+ database includes minimization problems exceeding 300 degrees of freedom.
508
+
509
+ See the free software program `GMIN <https://www-wales.ch.cam.ac.uk/GMIN>`_
510
+ for a Fortran implementation of basin-hopping. This implementation has many
511
+ variations of the procedure described above, including more
512
+ advanced step taking algorithms and alternate acceptance criterion.
513
+
514
+ For stochastic global optimization there is no way to determine if the true
515
+ global minimum has actually been found. Instead, as a consistency check,
516
+ the algorithm can be run from a number of different random starting points
517
+ to ensure the lowest minimum found in each example has converged to the
518
+ global minimum. For this reason, `basinhopping` will by default simply
519
+ run for the number of iterations `niter` and return the lowest minimum
520
+ found. It is left to the user to ensure that this is in fact the global
521
+ minimum.
522
+
523
+ Choosing `stepsize`: This is a crucial parameter in `basinhopping` and
524
+ depends on the problem being solved. The step is chosen uniformly in the
525
+ region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
526
+ should be comparable to the typical separation (in argument values) between
527
+ local minima of the function being optimized. `basinhopping` will, by
528
+ default, adjust `stepsize` to find an optimal value, but this may take
529
+ many iterations. You will get quicker results if you set a sensible
530
+ initial value for ``stepsize``.
531
+
532
+ Choosing `T`: The parameter `T` is the "temperature" used in the
533
+ Metropolis criterion. Basinhopping steps are always accepted if
534
+ ``func(xnew) < func(xold)``. Otherwise, they are accepted with
535
+ probability::
536
+
537
+ exp( -(func(xnew) - func(xold)) / T )
538
+
539
+ So, for best results, `T` should to be comparable to the typical
540
+ difference (in function values) between local minima. (The height of
541
+ "walls" between local minima is irrelevant.)
542
+
543
+ If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
544
+ steps that increase energy are rejected.
545
+
546
+ .. versionadded:: 0.12.0
547
+
548
+ References
549
+ ----------
550
+ .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
551
+ Cambridge, UK.
552
+ .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
553
+ the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
554
+ 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.
555
+ .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
556
+ multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
557
+ 1987, 84, 6611.
558
+ .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
559
+ crystals, and biomolecules, Science, 1999, 285, 1368.
560
+ .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
561
+ a General and Versatile Optimization Framework for the Characterization
562
+ of Biological Macromolecules, Advances in Artificial Intelligence,
563
+ Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
564
+
565
+ Examples
566
+ --------
567
+ The following example is a 1-D minimization problem, with many
568
+ local minima superimposed on a parabola.
569
+
570
+ >>> import numpy as np
571
+ >>> from scipy.optimize import basinhopping
572
+ >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
573
+ >>> x0 = [1.]
574
+
575
+ Basinhopping, internally, uses a local minimization algorithm. We will use
576
+ the parameter `minimizer_kwargs` to tell basinhopping which algorithm to
577
+ use and how to set up that minimizer. This parameter will be passed to
578
+ `scipy.optimize.minimize`.
579
+
580
+ >>> minimizer_kwargs = {"method": "BFGS"}
581
+ >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
582
+ ... niter=200)
583
+ >>> print("global minimum: x = %.4f, f(x) = %.4f" % (ret.x, ret.fun))
584
+ global minimum: x = -0.1951, f(x) = -1.0009
585
+
586
+ Next consider a 2-D minimization problem. Also, this time, we
587
+ will use gradient information to significantly speed up the search.
588
+
589
+ >>> def func2d(x):
590
+ ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
591
+ ... 0.2) * x[0]
592
+ ... df = np.zeros(2)
593
+ ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
594
+ ... df[1] = 2. * x[1] + 0.2
595
+ ... return f, df
596
+
597
+ We'll also use a different local minimization algorithm. Also, we must tell
598
+ the minimizer that our function returns both energy and gradient (Jacobian).
599
+
600
+ >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
601
+ >>> x0 = [1.0, 1.0]
602
+ >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
603
+ ... niter=200)
604
+ >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
605
+ ... ret.x[1],
606
+ ... ret.fun))
607
+ global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
608
+
609
+ Here is an example using a custom step-taking routine. Imagine you want
610
+ the first coordinate to take larger steps than the rest of the coordinates.
611
+ This can be implemented like so:
612
+
613
+ >>> class MyTakeStep:
614
+ ... def __init__(self, stepsize=0.5):
615
+ ... self.stepsize = stepsize
616
+ ... self.rng = np.random.default_rng()
617
+ ... def __call__(self, x):
618
+ ... s = self.stepsize
619
+ ... x[0] += self.rng.uniform(-2.*s, 2.*s)
620
+ ... x[1:] += self.rng.uniform(-s, s, x[1:].shape)
621
+ ... return x
622
+
623
+ Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
624
+ of `stepsize` to optimize the search. We'll use the same 2-D function as
625
+ before
626
+
627
+ >>> mytakestep = MyTakeStep()
628
+ >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
629
+ ... niter=200, take_step=mytakestep)
630
+ >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
631
+ ... ret.x[1],
632
+ ... ret.fun))
633
+ global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
634
+
635
+ Now, let's do an example using a custom callback function which prints the
636
+ value of every minimum found
637
+
638
+ >>> def print_fun(x, f, accepted):
639
+ ... print("at minimum %.4f accepted %d" % (f, int(accepted)))
640
+
641
+ We'll run it for only 10 basinhopping steps this time.
642
+
643
+ >>> rng = np.random.default_rng()
644
+ >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
645
+ ... niter=10, callback=print_fun, seed=rng)
646
+ at minimum 0.4159 accepted 1
647
+ at minimum -0.4317 accepted 1
648
+ at minimum -1.0109 accepted 1
649
+ at minimum -0.9073 accepted 1
650
+ at minimum -0.4317 accepted 0
651
+ at minimum -0.1021 accepted 1
652
+ at minimum -0.7425 accepted 1
653
+ at minimum -0.9073 accepted 1
654
+ at minimum -0.4317 accepted 0
655
+ at minimum -0.7425 accepted 1
656
+ at minimum -0.9073 accepted 1
657
+
658
+ The minimum at -1.0109 is actually the global minimum, found already on the
659
+ 8th iteration.
660
+
661
+ """ # numpy/numpydoc#87 # noqa: E501
662
+ if target_accept_rate <= 0. or target_accept_rate >= 1.:
663
+ raise ValueError('target_accept_rate has to be in range (0, 1)')
664
+ if stepwise_factor <= 0. or stepwise_factor >= 1.:
665
+ raise ValueError('stepwise_factor has to be in range (0, 1)')
666
+
667
+ x0 = np.array(x0)
668
+
669
+ # set up the np.random generator
670
+ rng = check_random_state(seed)
671
+
672
+ # set up minimizer
673
+ if minimizer_kwargs is None:
674
+ minimizer_kwargs = dict()
675
+ wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
676
+ **minimizer_kwargs)
677
+
678
+ # set up step-taking algorithm
679
+ if take_step is not None:
680
+ if not callable(take_step):
681
+ raise TypeError("take_step must be callable")
682
+ # if take_step.stepsize exists then use AdaptiveStepsize to control
683
+ # take_step.stepsize
684
+ if hasattr(take_step, "stepsize"):
685
+ take_step_wrapped = AdaptiveStepsize(
686
+ take_step, interval=interval,
687
+ accept_rate=target_accept_rate,
688
+ factor=stepwise_factor,
689
+ verbose=disp)
690
+ else:
691
+ take_step_wrapped = take_step
692
+ else:
693
+ # use default
694
+ displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
695
+ take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
696
+ accept_rate=target_accept_rate,
697
+ factor=stepwise_factor,
698
+ verbose=disp)
699
+
700
+ # set up accept tests
701
+ accept_tests = []
702
+ if accept_test is not None:
703
+ if not callable(accept_test):
704
+ raise TypeError("accept_test must be callable")
705
+ accept_tests = [accept_test]
706
+
707
+ # use default
708
+ metropolis = Metropolis(T, random_gen=rng)
709
+ accept_tests.append(metropolis)
710
+
711
+ if niter_success is None:
712
+ niter_success = niter + 2
713
+
714
+ bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
715
+ accept_tests, disp=disp)
716
+
717
+ # The wrapped minimizer is called once during construction of
718
+ # BasinHoppingRunner, so run the callback
719
+ if callable(callback):
720
+ callback(bh.storage.minres.x, bh.storage.minres.fun, True)
721
+
722
+ # start main iteration loop
723
+ count, i = 0, 0
724
+ message = ["requested number of basinhopping iterations completed"
725
+ " successfully"]
726
+ for i in range(niter):
727
+ new_global_min = bh.one_cycle()
728
+
729
+ if callable(callback):
730
+ # should we pass a copy of x?
731
+ val = callback(bh.xtrial, bh.energy_trial, bh.accept)
732
+ if val is not None:
733
+ if val:
734
+ message = ["callback function requested stop early by"
735
+ "returning True"]
736
+ break
737
+
738
+ count += 1
739
+ if new_global_min:
740
+ count = 0
741
+ elif count > niter_success:
742
+ message = ["success condition satisfied"]
743
+ break
744
+
745
+ # prepare return object
746
+ res = bh.res
747
+ res.lowest_optimization_result = bh.storage.get_lowest()
748
+ res.x = np.copy(res.lowest_optimization_result.x)
749
+ res.fun = res.lowest_optimization_result.fun
750
+ res.message = message
751
+ res.nit = i + 1
752
+ res.success = res.lowest_optimization_result.success
753
+ return res
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (364 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bracket.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy._lib._elementwise_iterative_method as eim
3
+ from scipy._lib._util import _RichResult
4
+
5
+ _ELIMITS = -1 # used in _bracket_root
6
+ _ESTOPONESIDE = 2 # used in _bracket_root
7
+
8
+ def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter):
9
+
10
+ if not callable(func):
11
+ raise ValueError('`func` must be callable.')
12
+
13
+ if not np.iterable(args):
14
+ args = (args,)
15
+
16
+ xl0 = np.asarray(xl0)[()]
17
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
18
+ raise ValueError('`xl0` must be numeric and real.')
19
+
20
+ xr0 = xl0 + 1 if xr0 is None else xr0
21
+ xmin = -np.inf if xmin is None else xmin
22
+ xmax = np.inf if xmax is None else xmax
23
+ factor = 2. if factor is None else factor
24
+ xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor)
25
+
26
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
27
+ raise ValueError('`xr0` must be numeric and real.')
28
+
29
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
30
+ raise ValueError('`xmin` must be numeric and real.')
31
+
32
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
33
+ raise ValueError('`xmax` must be numeric and real.')
34
+
35
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
36
+ raise ValueError('`factor` must be numeric and real.')
37
+ if not np.all(factor > 1):
38
+ raise ValueError('All elements of `factor` must be greater than 1.')
39
+
40
+ maxiter = np.asarray(maxiter)
41
+ message = '`maxiter` must be a non-negative integer.'
42
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
43
+ or np.iscomplex(maxiter)):
44
+ raise ValueError(message)
45
+ maxiter_int = int(maxiter[()])
46
+ if not maxiter == maxiter_int or maxiter < 0:
47
+ raise ValueError(message)
48
+
49
+ if not np.all((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)):
50
+ raise ValueError('`xmin <= xl0 < xr0 <= xmax` must be True (elementwise).')
51
+
52
+ return func, xl0, xr0, xmin, xmax, factor, args, maxiter
53
+
54
+
55
+ def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None,
56
+ args=(), maxiter=1000):
57
+ """Bracket the root of a monotonic scalar function of one variable
58
+
59
+ This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and
60
+ the elements of `args` are broadcastable arrays.
61
+
62
+ Parameters
63
+ ----------
64
+ func : callable
65
+ The function for which the root is to be bracketed.
66
+ The signature must be::
67
+
68
+ func(x: ndarray, *args) -> ndarray
69
+
70
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
71
+ which may contain an arbitrary number of arrays that are broadcastable
72
+ with `x`. ``func`` must be an elementwise function: each element
73
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
74
+ xl0, xr0: float array_like
75
+ Starting guess of bracket, which need not contain a root. If `xr0` is
76
+ not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another.
77
+ xmin, xmax : float array_like, optional
78
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
79
+ be broadcastable with `xl0` and `xr0`.
80
+ factor : float array_like, default: 2
81
+ The factor used to grow the bracket. See notes for details.
82
+ args : tuple, optional
83
+ Additional positional arguments to be passed to `func`. Must be arrays
84
+ broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be
85
+ bracketed requires arguments that are not broadcastable with these
86
+ arrays, wrap that callable with `func` such that `func` accepts
87
+ only `x` and broadcastable arrays.
88
+ maxiter : int, optional
89
+ The maximum number of iterations of the algorithm to perform.
90
+
91
+ Returns
92
+ -------
93
+ res : _RichResult
94
+ An instance of `scipy._lib._util._RichResult` with the following
95
+ attributes. The descriptions are written as though the values will be
96
+ scalars; however, if `func` returns an array, the outputs will be
97
+ arrays of the same shape.
98
+
99
+ xl, xr : float
100
+ The lower and upper ends of the bracket, if the algorithm
101
+ terminated successfully.
102
+ fl, fr : float
103
+ The function value at the lower and upper ends of the bracket.
104
+ nfev : int
105
+ The number of function evaluations required to find the bracket.
106
+ This is distinct from the number of times `func` is *called*
107
+ because the function may evaluated at multiple points in a single
108
+ call.
109
+ nit : int
110
+ The number of iterations of the algorithm that were performed.
111
+ status : int
112
+ An integer representing the exit status of the algorithm.
113
+
114
+ - ``0`` : The algorithm produced a valid bracket.
115
+ - ``-1`` : The bracket expanded to the allowable limits without finding a bracket.
116
+ - ``-2`` : The maximum number of iterations was reached.
117
+ - ``-3`` : A non-finite value was encountered.
118
+ - ``-4`` : Iteration was terminated by `callback`.
119
+ - ``1`` : The algorithm is proceeding normally (in `callback` only).
120
+ - ``2`` : A bracket was found in the opposite search direction (in `callback` only).
121
+
122
+ success : bool
123
+ ``True`` when the algorithm terminated successfully (status ``0``).
124
+
125
+ Notes
126
+ -----
127
+ This function generalizes an algorithm found in pieces throughout
128
+ `scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)`
129
+ until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows.
130
+
131
+ - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively
132
+ increased by `factor`.
133
+ - If `xmin` is provided, the distance between `xmin` and `l` is iteratively
134
+ decreased by `factor`. Note that this also *increases* the bracket size.
135
+
136
+ Growth of the bracket to the right is analogous.
137
+
138
+ Growth of the bracket in one direction stops when the endpoint is no longer
139
+ finite, the function value at the endpoint is no longer finite, or the
140
+ endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates
141
+ when the bracket stops growing in both directions, the bracket surrounds
142
+ the root, or a root is found (accidentally).
143
+
144
+ If two brackets are found - that is, a bracket is found on both sides in
145
+ the same iteration, the smaller of the two is returned.
146
+ If roots of the function are found, both `l` and `r` are set to the
147
+ leftmost root.
148
+
149
+ """ # noqa: E501
150
+ # Todo:
151
+ # - find bracket with sign change in specified direction
152
+ # - Add tolerance
153
+ # - allow factor < 1?
154
+
155
+ callback = None # works; I just don't want to test it
156
+ temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter)
157
+ func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
158
+
159
+ xs = (xl0, xr0)
160
+ temp = eim._initialize(func, xs, args)
161
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
162
+
163
+ # The approach is to treat the left and right searches as though they were
164
+ # (almost) totally independent one-sided bracket searches. (The interaction
165
+ # is considered when checking for termination and preparing the result
166
+ # object.)
167
+ # `x` is the "moving" end of the bracket
168
+ x = np.concatenate(xs)
169
+ f = np.concatenate(fs)
170
+ n = len(x) // 2
171
+
172
+ # `x_last` is the previous location of the moving end of the bracket. If
173
+ # the signs of `f` and `f_last` are different, `x` and `x_last` form a
174
+ # bracket.
175
+ x_last = np.concatenate((x[n:], x[:n]))
176
+ f_last = np.concatenate((f[n:], f[:n]))
177
+ # `x0` is the "fixed" end of the bracket.
178
+ x0 = x_last
179
+ # We don't need to retain the corresponding function value, since the
180
+ # fixed end of the bracket is only needed to compute the new value of the
181
+ # moving end; it is never returned.
182
+
183
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
184
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
185
+ limit = np.concatenate((xmin, xmax))
186
+
187
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel()
188
+ factor = np.concatenate((factor, factor))
189
+
190
+ active = np.arange(2*n)
191
+ args = [np.concatenate((arg, arg)) for arg in args]
192
+
193
+ # This is needed due to inner workings of `eim._loop`.
194
+ # We're abusing it a tiny bit.
195
+ shape = shape + (2,)
196
+
197
+ # `d` is for "distance".
198
+ # For searches without a limit, the distance between the fixed end of the
199
+ # bracket `x0` and the moving end `x` will grow by `factor` each iteration.
200
+ # For searches with a limit, the distance between the `limit` and moving
201
+ # end of the bracket `x` will shrink by `factor` each iteration.
202
+ i = np.isinf(limit)
203
+ ni = ~i
204
+ d = np.zeros_like(x)
205
+ d[i] = x[i] - x0[i]
206
+ d[ni] = limit[ni] - x[ni]
207
+
208
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
209
+ nit, nfev = 0, 1 # one function evaluation per side performed above
210
+
211
+ work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor,
212
+ active=active, d=d, x_last=x_last, f_last=f_last,
213
+ nit=nit, nfev=nfev, status=status, args=args,
214
+ xl=None, xr=None, fl=None, fr=None, n=n)
215
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'),
216
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'),
217
+ ('fr', 'fr'), ('x', 'x'), ('f', 'f'),
218
+ ('x_last', 'x_last'), ('f_last', 'f_last')]
219
+
220
+ def pre_func_eval(work):
221
+ # Initialize moving end of bracket
222
+ x = np.zeros_like(work.x)
223
+
224
+ # Unlimited brackets grow by `factor` by increasing distance from fixed
225
+ # end to moving end.
226
+ i = np.isinf(work.limit) # indices of unlimited brackets
227
+ work.d[i] *= work.factor[i]
228
+ x[i] = work.x0[i] + work.d[i]
229
+
230
+ # Limited brackets grow by decreasing the distance from the limit to
231
+ # the moving end.
232
+ ni = ~i # indices of limited brackets
233
+ work.d[ni] /= work.factor[ni]
234
+ x[ni] = work.limit[ni] - work.d[ni]
235
+
236
+ return x
237
+
238
+ def post_func_eval(x, f, work):
239
+ # Keep track of the previous location of the moving end so that we can
240
+ # return a narrower bracket. (The alternative is to remember the
241
+ # original fixed end, but then the bracket would be wider than needed.)
242
+ work.x_last = work.x
243
+ work.f_last = work.f
244
+ work.x = x
245
+ work.f = f
246
+
247
+ def check_termination(work):
248
+ stop = np.zeros_like(work.x, dtype=bool)
249
+
250
+ # Condition 1: a valid bracket (or the root itself) has been found
251
+ sf = np.sign(work.f)
252
+ sf_last = np.sign(work.f_last)
253
+ i = (sf_last == -sf) | (sf_last == 0) | (sf == 0)
254
+ work.status[i] = eim._ECONVERGED
255
+ stop[i] = True
256
+
257
+ # Condition 2: the other side's search found a valid bracket.
258
+ # (If we just found a bracket with the rightward search, we can stop
259
+ # the leftward search, and vice-versa.)
260
+ # To do this, we need to set the status of the other side's search;
261
+ # this is tricky because `work.status` contains only the *active*
262
+ # elements, so we don't immediately know the index of the element we
263
+ # need to set - or even if it's still there. (That search may have
264
+ # terminated already, e.g. by reaching its `limit`.)
265
+ # To facilitate this, `work.active` contains a unit integer index of
266
+ # each search. Index `k` (`k < n)` and `k + n` correspond with a
267
+ # leftward and rightward search, respectively. Elements are removed
268
+ # from `work.active` just as they are removed from `work.status`, so
269
+ # we use `work.active` to help find the right location in
270
+ # `work.status`.
271
+ # Get the integer indices of the elements that can also stop
272
+ also_stop = (work.active[i] + work.n) % (2*work.n)
273
+ # Check whether they are still active.
274
+ # To start, we need to find out where in `work.active` they would
275
+ # appear if they are indeed there.
276
+ j = np.searchsorted(work.active, also_stop)
277
+ # If the location exceeds the length of the `work.active`, they are
278
+ # not there.
279
+ j = j[j < len(work.active)]
280
+ # Check whether they are still there.
281
+ j = j[also_stop == work.active[j]]
282
+ # Now convert these to boolean indices to use with `work.status`.
283
+ i = np.zeros_like(stop)
284
+ i[j] = True # boolean indices of elements that can also stop
285
+ i = i & ~stop
286
+ work.status[i] = _ESTOPONESIDE
287
+ stop[i] = True
288
+
289
+ # Condition 3: moving end of bracket reaches limit
290
+ i = (work.x == work.limit) & ~stop
291
+ work.status[i] = _ELIMITS
292
+ stop[i] = True
293
+
294
+ # Condition 4: non-finite value encountered
295
+ i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop
296
+ work.status[i] = eim._EVALUEERR
297
+ stop[i] = True
298
+
299
+ return stop
300
+
301
+ def post_termination_check(work):
302
+ pass
303
+
304
+ def customize_result(res, shape):
305
+ n = len(res['x']) // 2
306
+
307
+ # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint
308
+ # as `a` and `xr0`, the initial right endpoint, as `b`.
309
+ # Because we treat the two one-sided searches as though they were
310
+ # independent, what we keep track of in `work` and what we want to
311
+ # return in `res` look quite different. Combine the results from the
312
+ # two one-sided searches before reporting the results to the user.
313
+ # - "a" refers to the leftward search (the moving end started at `a`)
314
+ # - "b" refers to the rightward search (the moving end started at `b`)
315
+ # - "l" refers to the left end of the bracket (closer to -oo)
316
+ # - "r" refers to the right end of the bracket (closer to +oo)
317
+ xal = res['x'][:n]
318
+ xar = res['x_last'][:n]
319
+ xbl = res['x_last'][n:]
320
+ xbr = res['x'][n:]
321
+
322
+ fal = res['f'][:n]
323
+ far = res['f_last'][:n]
324
+ fbl = res['f_last'][n:]
325
+ fbr = res['f'][n:]
326
+
327
+ # Initialize the brackets and corresponding function values to return
328
+ # to the user. Brackets may not be valid (e.g. there is no root,
329
+ # there weren't enough iterations, NaN encountered), but we still need
330
+ # to return something. One option would be all NaNs, but what I've
331
+ # chosen here is the left- and right-most points at which the function
332
+ # has been evaluated. This gives the user some information about what
333
+ # interval of the real line has been searched and shows that there is
334
+ # no sign change between the two ends.
335
+ xl = xal.copy()
336
+ fl = fal.copy()
337
+ xr = xbr.copy()
338
+ fr = fbr.copy()
339
+
340
+ # `status` indicates whether the bracket is valid or not. If so,
341
+ # we want to adjust the bracket we return to be the narrowest possible
342
+ # given the points at which we evaluated the function.
343
+ # For example if bracket "a" is valid and smaller than bracket "b" OR
344
+ # if bracket "a" is valid and bracket "b" is not valid, we want to
345
+ # return bracket "a" (and vice versa).
346
+ sa = res['status'][:n]
347
+ sb = res['status'][n:]
348
+
349
+ da = xar - xal
350
+ db = xbr - xbl
351
+
352
+ i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0))
353
+ i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0))
354
+
355
+ xr[i1] = xar[i1]
356
+ fr[i1] = far[i1]
357
+ xl[i2] = xbl[i2]
358
+ fl[i2] = fbl[i2]
359
+
360
+ # Finish assembling the result object
361
+ res['xl'] = xl
362
+ res['xr'] = xr
363
+ res['fl'] = fl
364
+ res['fr'] = fr
365
+
366
+ res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:])
367
+ res['nfev'] = res['nfev'][:n] + res['nfev'][n:]
368
+ # If the status on one side is zero, the status is zero. In any case,
369
+ # report the status from one side only.
370
+ res['status'] = np.choose(sa == 0, (sb, sa))
371
+ res['success'] = (res['status'] == 0)
372
+
373
+ del res['x']
374
+ del res['f']
375
+ del res['x_last']
376
+ del res['f_last']
377
+
378
+ return shape[:-1]
379
+
380
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
381
+ pre_func_eval, post_func_eval, check_termination,
382
+ post_termination_check, customize_result, res_work_pairs)
383
+
384
+
385
+ def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter):
386
+
387
+ if not callable(func):
388
+ raise ValueError('`func` must be callable.')
389
+
390
+ if not np.iterable(args):
391
+ args = (args,)
392
+
393
+ xm0 = np.asarray(xm0)[()]
394
+ if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any():
395
+ raise ValueError('`xm0` must be numeric and real.')
396
+
397
+ xmin = -np.inf if xmin is None else xmin
398
+ xmax = np.inf if xmax is None else xmax
399
+
400
+ xl0_not_supplied = False
401
+ if xl0 is None:
402
+ xl0 = xm0 - 0.5
403
+ xl0_not_supplied = True
404
+
405
+ xr0_not_supplied = False
406
+ if xr0 is None:
407
+ xr0 = xm0 + 0.5
408
+ xr0_not_supplied = True
409
+
410
+ factor = 2.0 if factor is None else factor
411
+ xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays(
412
+ xl0, xm0, xr0, xmin, xmax, factor
413
+ )
414
+
415
+ if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
416
+ raise ValueError('`xl0` must be numeric and real.')
417
+
418
+ if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
419
+ raise ValueError('`xr0` must be numeric and real.')
420
+
421
+ if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
422
+ raise ValueError('`xmin` must be numeric and real.')
423
+
424
+ if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
425
+ raise ValueError('`xmax` must be numeric and real.')
426
+
427
+ if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
428
+ raise ValueError('`factor` must be numeric and real.')
429
+ if not np.all(factor > 1):
430
+ raise ValueError('All elements of `factor` must be greater than 1.')
431
+
432
+ # Default choices for xl or xr might have exceeded xmin or xmax. Adjust
433
+ # to make sure this doesn't happen. We replace with copies because xl, and xr
434
+ # are read-only views produced by broadcast_arrays.
435
+ if xl0_not_supplied:
436
+ xl0 = xl0.copy()
437
+ cond = ~np.isinf(xmin) & (xl0 < xmin)
438
+ xl0[cond] = (
439
+ xm0[cond] - xmin[cond]
440
+ ) / np.array(16, dtype=xl0.dtype)
441
+ if xr0_not_supplied:
442
+ xr0 = xr0.copy()
443
+ cond = ~np.isinf(xmax) & (xmax < xr0)
444
+ xr0[cond] = (
445
+ xmax[cond] - xm0[cond]
446
+ ) / np.array(16, dtype=xr0.dtype)
447
+
448
+ maxiter = np.asarray(maxiter)
449
+ message = '`maxiter` must be a non-negative integer.'
450
+ if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
451
+ or np.iscomplex(maxiter)):
452
+ raise ValueError(message)
453
+ maxiter_int = int(maxiter[()])
454
+ if not maxiter == maxiter_int or maxiter < 0:
455
+ raise ValueError(message)
456
+
457
+ if not np.all((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)):
458
+ raise ValueError(
459
+ '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True (elementwise).'
460
+ )
461
+
462
+ return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter
463
+
464
+
465
+ def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None,
466
+ factor=None, args=(), maxiter=1000):
467
+ """Bracket the minimum of a unimodal scalar function of one variable
468
+
469
+ This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`,
470
+ and the elements of `args` are broadcastable arrays.
471
+
472
+ Parameters
473
+ ----------
474
+ func : callable
475
+ The function for which the minimum is to be bracketed.
476
+ The signature must be::
477
+
478
+ func(x: ndarray, *args) -> ndarray
479
+
480
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
481
+ which may contain an arbitrary number of arrays that are broadcastable
482
+ with ``x``. `func` must be an elementwise function: each element
483
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`.
484
+ xm0: float array_like
485
+ Starting guess for middle point of bracket.
486
+ xl0, xr0: float array_like, optional
487
+ Starting guesses for left and right endpoints of the bracket. Must be
488
+ broadcastable with one another and with `xm0`.
489
+ xmin, xmax : float array_like, optional
490
+ Minimum and maximum allowable endpoints of the bracket, inclusive. Must
491
+ be broadcastable with `xl0`, `xm0`, and `xr0`.
492
+ factor : float array_like, optional
493
+ Controls expansion of bracket endpoint in downhill direction. Works
494
+ differently in the cases where a limit is set in the downhill direction
495
+ with `xmax` or `xmin`. See Notes.
496
+ args : tuple, optional
497
+ Additional positional arguments to be passed to `func`. Must be arrays
498
+ broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the
499
+ callable to be bracketed requires arguments that are not broadcastable
500
+ with these arrays, wrap that callable with `func` such that `func`
501
+ accepts only ``x`` and broadcastable arrays.
502
+ maxiter : int, optional
503
+ The maximum number of iterations of the algorithm to perform. The number
504
+ of function evaluations is three greater than the number of iterations.
505
+
506
+ Returns
507
+ -------
508
+ res : _RichResult
509
+ An instance of `scipy._lib._util._RichResult` with the following
510
+ attributes. The descriptions are written as though the values will be
511
+ scalars; however, if `func` returns an array, the outputs will be
512
+ arrays of the same shape.
513
+
514
+ xl, xm, xr : float
515
+ The left, middle, and right points of the bracket, if the algorithm
516
+ terminated successfully.
517
+ fl, fm, fr : float
518
+ The function value at the left, middle, and right points of the bracket.
519
+ nfev : int
520
+ The number of function evaluations required to find the bracket.
521
+ nit : int
522
+ The number of iterations of the algorithm that were performed.
523
+ status : int
524
+ An integer representing the exit status of the algorithm.
525
+
526
+ - ``0`` : The algorithm produced a valid bracket.
527
+ - ``-1`` : The bracket expanded to the allowable limits. Assuming
528
+ unimodality, this implies the endpoint at the limit is a
529
+ minimizer.
530
+ - ``-2`` : The maximum number of iterations was reached.
531
+ - ``-3`` : A non-finite value was encountered.
532
+
533
+ success : bool
534
+ ``True`` when the algorithm terminated successfully (status ``0``).
535
+
536
+ Notes
537
+ -----
538
+ Similar to `scipy.optimize.bracket`, this function seeks to find real
539
+ points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``,
540
+ where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`,
541
+ this function can operate in a vectorized manner on array input, so long as
542
+ the input arrays are broadcastable with each other. Also unlike
543
+ `scipy.optimize.bracket`, users may specify minimum and maximum endpoints
544
+ for the desired bracket.
545
+
546
+ Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``,
547
+ the algorithm checks if these points already give a valid bracket. If not,
548
+ a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new
549
+ opposite endpoint, and either `xl` or `xr` becomes the new middle point,
550
+ depending on which direction is downhill. The algorithm repeats from here.
551
+
552
+ The new endpoint `w` is chosen differently depending on whether or not a
553
+ boundary `xmin` or `xmax` has been set in the downhill direction. Without
554
+ loss of generality, suppose the downhill direction is to the right, so that
555
+ ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w`
556
+ is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by
557
+ the user (defaults to 2.0) so that step sizes increase in geometric proportion.
558
+ If there is a boundary, `xmax` in this case, then `w` is chosen to be
559
+ ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at
560
+ `xmax`. This cautious approach ensures that a minimum near but distinct from
561
+ the boundary isn't missed while also detecting whether or not the `xmax` is
562
+ a minimizer when `xmax` is reached after a finite number of steps.
563
+ """ # noqa: E501
564
+ callback = None # works; I just don't want to test it
565
+
566
+ temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter)
567
+ func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
568
+
569
+ xs = (xl0, xm0, xr0)
570
+ func, xs, fs, args, shape, dtype = eim._initialize(func, xs, args)
571
+
572
+ xl0, xm0, xr0 = xs
573
+ fl0, fm0, fr0 = fs
574
+ xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
575
+ xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
576
+ # We will modify factor later on so make a copy. np.broadcast_to returns
577
+ # a read-only view.
578
+ factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel()
579
+
580
+ # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be
581
+ # marching downhill in the direction from xl to xr.
582
+ comp = fl0 < fr0
583
+ xl0[comp], xr0[comp] = xr0[comp], xl0[comp]
584
+ fl0[comp], fr0[comp] = fr0[comp], fl0[comp]
585
+ # We only need the boundary in the direction we're traveling.
586
+ limit = np.where(comp, xmin, xmax)
587
+
588
+ unlimited = np.isinf(limit)
589
+ limited = ~unlimited
590
+ step = np.empty_like(xl0)
591
+
592
+ step[unlimited] = (xr0[unlimited] - xm0[unlimited])
593
+ step[limited] = (limit[limited] - xr0[limited])
594
+
595
+ # Step size is divided by factor for case where there is a limit.
596
+ factor[limited] = 1 / factor[limited]
597
+
598
+ status = np.full_like(xl0, eim._EINPROGRESS, dtype=int)
599
+ nit, nfev = 0, 3
600
+
601
+ work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0,
602
+ step=step, limit=limit, limited=limited, factor=factor, nit=nit,
603
+ nfev=nfev, status=status, args=args)
604
+
605
+ res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'),
606
+ ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'),
607
+ ('fr', 'fr')]
608
+
609
+ def pre_func_eval(work):
610
+ work.step *= work.factor
611
+ x = np.empty_like(work.xr)
612
+ x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited]
613
+ x[work.limited] = work.limit[work.limited] - work.step[work.limited]
614
+ # Since the new bracket endpoint is calculated from an offset with the
615
+ # limit, it may be the case that the new endpoint equals the old endpoint,
616
+ # when the old endpoint is sufficiently close to the limit. We use the
617
+ # limit itself as the new endpoint in these cases.
618
+ x[work.limited] = np.where(
619
+ x[work.limited] == work.xr[work.limited],
620
+ work.limit[work.limited],
621
+ x[work.limited],
622
+ )
623
+ return x
624
+
625
+ def post_func_eval(x, f, work):
626
+ work.xl, work.xm, work.xr = work.xm, work.xr, x
627
+ work.fl, work.fm, work.fr = work.fm, work.fr, f
628
+
629
+ def check_termination(work):
630
+ # Condition 1: A valid bracket has been found.
631
+ stop = (
632
+ (work.fl >= work.fm) & (work.fr > work.fm)
633
+ | (work.fl > work.fm) & (work.fr >= work.fm)
634
+ )
635
+ work.status[stop] = eim._ECONVERGED
636
+
637
+ # Condition 2: Moving end of bracket reaches limit.
638
+ i = (work.xr == work.limit) & ~stop
639
+ work.status[i] = _ELIMITS
640
+ stop[i] = True
641
+
642
+ # Condition 3: non-finite value encountered
643
+ i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop
644
+ work.status[i] = eim._EVALUEERR
645
+ stop[i] = True
646
+
647
+ return stop
648
+
649
+ def post_termination_check(work):
650
+ pass
651
+
652
+ def customize_result(res, shape):
653
+ # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0).
654
+ comp = res['xl'] > res['xr']
655
+ res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp]
656
+ res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp]
657
+ return shape
658
+
659
+ return eim._loop(work, callback, shape,
660
+ maxiter, func, args, dtype,
661
+ pre_func_eval, post_func_eval,
662
+ check_termination, post_termination_check,
663
+ customize_result, res_work_pairs)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from ._zeros_py import _xtol, _rtol, _iter
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol,
7
+ fatol=None, frtol=0, maxiter=_iter, callback=None):
8
+ """Find the root of an elementwise function using Chandrupatla's algorithm.
9
+
10
+ For each element of the output of `func`, `chandrupatla` seeks the scalar
11
+ root that makes the element 0. This function allows for `a`, `b`, and the
12
+ output of `func` to be of any broadcastable shapes.
13
+
14
+ Parameters
15
+ ----------
16
+ func : callable
17
+ The function whose root is desired. The signature must be::
18
+
19
+ func(x: ndarray, *args) -> ndarray
20
+
21
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
22
+ which may contain an arbitrary number of components of any type(s).
23
+ ``func`` must be an elementwise function: each element ``func(x)[i]``
24
+ must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla`
25
+ seeks an array ``x`` such that ``func(x)`` is an array of zeros.
26
+ a, b : array_like
27
+ The lower and upper bounds of the root of the function. Must be
28
+ broadcastable with one another.
29
+ args : tuple, optional
30
+ Additional positional arguments to be passed to `func`.
31
+ xatol, xrtol, fatol, frtol : float, optional
32
+ Absolute and relative tolerances on the root and function value.
33
+ See Notes for details.
34
+ maxiter : int, optional
35
+ The maximum number of iterations of the algorithm to perform.
36
+ callback : callable, optional
37
+ An optional user-supplied function to be called before the first
38
+ iteration and after each iteration.
39
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
40
+ similar to that returned by `_chandrupatla` (but containing the current
41
+ iterate's values of all variables). If `callback` raises a
42
+ ``StopIteration``, the algorithm will terminate immediately and
43
+ `_chandrupatla` will return a result.
44
+
45
+ Returns
46
+ -------
47
+ res : _RichResult
48
+ An instance of `scipy._lib._util._RichResult` with the following
49
+ attributes. The descriptions are written as though the values will be
50
+ scalars; however, if `func` returns an array, the outputs will be
51
+ arrays of the same shape.
52
+
53
+ x : float
54
+ The root of the function, if the algorithm terminated successfully.
55
+ nfev : int
56
+ The number of times the function was called to find the root.
57
+ nit : int
58
+ The number of iterations of Chandrupatla's algorithm performed.
59
+ status : int
60
+ An integer representing the exit status of the algorithm.
61
+ ``0`` : The algorithm converged to the specified tolerances.
62
+ ``-1`` : The algorithm encountered an invalid bracket.
63
+ ``-2`` : The maximum number of iterations was reached.
64
+ ``-3`` : A non-finite value was encountered.
65
+ ``-4`` : Iteration was terminated by `callback`.
66
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
67
+ success : bool
68
+ ``True`` when the algorithm terminated successfully (status ``0``).
69
+ fun : float
70
+ The value of `func` evaluated at `x`.
71
+ xl, xr : float
72
+ The lower and upper ends of the bracket.
73
+ fl, fr : float
74
+ The function value at the lower and upper ends of the bracket.
75
+
76
+ Notes
77
+ -----
78
+ Implemented based on Chandrupatla's original paper [1]_.
79
+
80
+ If ``xl`` and ``xr`` are the left and right ends of the bracket,
81
+ ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``,
82
+ and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to
83
+ have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or
84
+ ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the
85
+ termination condition described in [1]_ with ``xrtol = 4e-10``,
86
+ ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are
87
+ ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``,
88
+ and ``fatol`` is the smallest normal number of the ``dtype`` returned
89
+ by ``func``.
90
+
91
+ References
92
+ ----------
93
+
94
+ .. [1] Chandrupatla, Tirupathi R.
95
+ "A new hybrid quadratic/bisection algorithm for finding the zero of a
96
+ nonlinear function without using derivatives".
97
+ Advances in Engineering Software, 28(3), 145-149.
98
+ https://doi.org/10.1016/s0965-9978(96)00051-8
99
+
100
+ See Also
101
+ --------
102
+ brentq, brenth, ridder, bisect, newton
103
+
104
+ Examples
105
+ --------
106
+ >>> from scipy import optimize
107
+ >>> def f(x, c):
108
+ ... return x**3 - 2*x - c
109
+ >>> c = 5
110
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
111
+ >>> res.x
112
+ 2.0945514818937463
113
+
114
+ >>> c = [3, 4, 5]
115
+ >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
116
+ >>> res.x
117
+ array([1.8932892 , 2. , 2.09455148])
118
+
119
+ """
120
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
121
+ fatol, frtol, maxiter, callback)
122
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
123
+
124
+ # Initialization
125
+ temp = eim._initialize(func, (a, b), args)
126
+ func, xs, fs, args, shape, dtype = temp
127
+ x1, x2 = xs
128
+ f1, f2 = fs
129
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
130
+ nit, nfev = 0, 2 # two function evaluations performed above
131
+ xatol = _xtol if xatol is None else xatol
132
+ xrtol = _rtol if xrtol is None else xrtol
133
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
134
+ frtol = frtol * np.minimum(np.abs(f1), np.abs(f2))
135
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5,
136
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
137
+ nit=nit, nfev=nfev, status=status)
138
+ res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'),
139
+ ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'),
140
+ ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')]
141
+
142
+ def pre_func_eval(work):
143
+ # [1] Figure 1 (first box)
144
+ x = work.x1 + work.t * (work.x2 - work.x1)
145
+ return x
146
+
147
+ def post_func_eval(x, f, work):
148
+ # [1] Figure 1 (first diamond and boxes)
149
+ # Note: y/n are reversed in figure; compare to BASIC in appendix
150
+ work.x3, work.f3 = work.x2.copy(), work.f2.copy()
151
+ j = np.sign(f) == np.sign(work.f1)
152
+ nj = ~j
153
+ work.x3[j], work.f3[j] = work.x1[j], work.f1[j]
154
+ work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj]
155
+ work.x1, work.f1 = x, f
156
+
157
+ def check_termination(work):
158
+ # [1] Figure 1 (second diamond)
159
+ # Check for all terminal conditions and record statuses.
160
+
161
+ # See [1] Section 4 (first two sentences)
162
+ i = np.abs(work.f1) < np.abs(work.f2)
163
+ work.xmin = np.choose(i, (work.x2, work.x1))
164
+ work.fmin = np.choose(i, (work.f2, work.f1))
165
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
166
+
167
+ # This is the convergence criterion used in bisect. Chandrupatla's
168
+ # criterion is equivalent to this except with a factor of 4 on `xrtol`.
169
+ work.dx = abs(work.x2 - work.x1)
170
+ work.tol = abs(work.xmin) * work.xrtol + work.xatol
171
+ i = work.dx < work.tol
172
+ # Modify in place to incorporate tolerance on function value. Note that
173
+ # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`,
174
+ # where `f1` and `f2` are the function evaluated at the original ends of
175
+ # the bracket.
176
+ i |= np.abs(work.fmin) <= work.fatol + work.frtol
177
+ work.status[i] = eim._ECONVERGED
178
+ stop[i] = True
179
+
180
+ i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop
181
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._ESIGNERR
182
+ stop[i] = True
183
+
184
+ i = ~((np.isfinite(work.x1) & np.isfinite(work.x2)
185
+ & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop)
186
+ work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._EVALUEERR
187
+ stop[i] = True
188
+
189
+ return stop
190
+
191
+ def post_termination_check(work):
192
+ # [1] Figure 1 (third diamond and boxes / Equation 1)
193
+ xi1 = (work.x1 - work.x2) / (work.x3 - work.x2)
194
+ phi1 = (work.f1 - work.f2) / (work.f3 - work.f2)
195
+ alpha = (work.x3 - work.x1) / (work.x2 - work.x1)
196
+ j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1))
197
+
198
+ f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j]
199
+ t = np.full_like(alpha, 0.5)
200
+ t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j)
201
+ - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j))
202
+
203
+ # [1] Figure 1 (last box; see also BASIC in appendix with comment
204
+ # "Adjust T Away from the Interval Boundary")
205
+ tl = 0.5 * work.tol / work.dx
206
+ work.t = np.clip(t, tl, 1 - tl)
207
+
208
+ def customize_result(res, shape):
209
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
210
+ i = res['xl'] < res['xr']
211
+ res['xl'] = np.choose(i, (xr, xl))
212
+ res['xr'] = np.choose(i, (xl, xr))
213
+ res['fl'] = np.choose(i, (fr, fl))
214
+ res['fr'] = np.choose(i, (fl, fr))
215
+ return shape
216
+
217
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
218
+ pre_func_eval, post_func_eval, check_termination,
219
+ post_termination_check, customize_result, res_work_pairs)
220
+
221
+
222
+ def _chandrupatla_iv(func, args, xatol, xrtol,
223
+ fatol, frtol, maxiter, callback):
224
+ # Input validation for `_chandrupatla`
225
+
226
+ if not callable(func):
227
+ raise ValueError('`func` must be callable.')
228
+
229
+ if not np.iterable(args):
230
+ args = (args,)
231
+
232
+ tols = np.asarray([xatol if xatol is not None else 1,
233
+ xrtol if xrtol is not None else 1,
234
+ fatol if fatol is not None else 1,
235
+ frtol if frtol is not None else 1])
236
+ if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0)
237
+ or np.any(np.isnan(tols)) or tols.shape != (4,)):
238
+ raise ValueError('Tolerances must be non-negative scalars.')
239
+
240
+ maxiter_int = int(maxiter)
241
+ if maxiter != maxiter_int or maxiter < 0:
242
+ raise ValueError('`maxiter` must be a non-negative integer.')
243
+
244
+ if callback is not None and not callable(callback):
245
+ raise ValueError('`callback` must be callable.')
246
+
247
+ return func, args, xatol, xrtol, fatol, frtol, maxiter, callback
248
+
249
+
250
+ def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None,
251
+ xrtol=None, fatol=None, frtol=None, maxiter=100,
252
+ callback=None):
253
+ """Find the minimizer of an elementwise function.
254
+
255
+ For each element of the output of `func`, `_chandrupatla_minimize` seeks
256
+ the scalar minimizer that minimizes the element. This function allows for
257
+ `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any
258
+ broadcastable shapes.
259
+
260
+ Parameters
261
+ ----------
262
+ func : callable
263
+ The function whose minimizer is desired. The signature must be::
264
+
265
+ func(x: ndarray, *args) -> ndarray
266
+
267
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
268
+ which may contain an arbitrary number of arrays that are broadcastable
269
+ with `x`. ``func`` must be an elementwise function: each element
270
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
271
+ `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array
272
+ of minima.
273
+ x1, x2, x3 : array_like
274
+ The abscissae of a standard scalar minimization bracket. A bracket is
275
+ valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``.
276
+ Must be broadcastable with one another and `args`.
277
+ args : tuple, optional
278
+ Additional positional arguments to be passed to `func`. Must be arrays
279
+ broadcastable with `x1`, `x2`, and `x3`. If the callable to be
280
+ differentiated requires arguments that are not broadcastable with `x`,
281
+ wrap that callable with `func` such that `func` accepts only `x` and
282
+ broadcastable arrays.
283
+ xatol, xrtol, fatol, frtol : float, optional
284
+ Absolute and relative tolerances on the minimizer and function value.
285
+ See Notes for details.
286
+ maxiter : int, optional
287
+ The maximum number of iterations of the algorithm to perform.
288
+ callback : callable, optional
289
+ An optional user-supplied function to be called before the first
290
+ iteration and after each iteration.
291
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
292
+ similar to that returned by `_chandrupatla_minimize` (but containing
293
+ the current iterate's values of all variables). If `callback` raises a
294
+ ``StopIteration``, the algorithm will terminate immediately and
295
+ `_chandrupatla_minimize` will return a result.
296
+
297
+ Returns
298
+ -------
299
+ res : _RichResult
300
+ An instance of `scipy._lib._util._RichResult` with the following
301
+ attributes. (The descriptions are written as though the values will be
302
+ scalars; however, if `func` returns an array, the outputs will be
303
+ arrays of the same shape.)
304
+
305
+ success : bool
306
+ ``True`` when the algorithm terminated successfully (status ``0``).
307
+ status : int
308
+ An integer representing the exit status of the algorithm.
309
+ ``0`` : The algorithm converged to the specified tolerances.
310
+ ``-1`` : The algorithm encountered an invalid bracket.
311
+ ``-2`` : The maximum number of iterations was reached.
312
+ ``-3`` : A non-finite value was encountered.
313
+ ``-4`` : Iteration was terminated by `callback`.
314
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
315
+ x : float
316
+ The minimizer of the function, if the algorithm terminated
317
+ successfully.
318
+ fun : float
319
+ The value of `func` evaluated at `x`.
320
+ nfev : int
321
+ The number of points at which `func` was evaluated.
322
+ nit : int
323
+ The number of iterations of the algorithm that were performed.
324
+ xl, xm, xr : float
325
+ The final three-point bracket.
326
+ fl, fm, fr : float
327
+ The function value at the bracket points.
328
+
329
+ Notes
330
+ -----
331
+ Implemented based on Chandrupatla's original paper [1]_.
332
+
333
+ If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3``
334
+ are the values of ``func`` at those points, then the algorithm is
335
+ considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol``
336
+ or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of
337
+ these differs from the termination conditions described in [1]_. The
338
+ default values of `xrtol` is the square root of the precision of the
339
+ appropriate dtype, and ``xatol=fatol = frtol`` is the smallest normal
340
+ number of the appropriate dtype.
341
+
342
+ References
343
+ ----------
344
+ .. [1] Chandrupatla, Tirupathi R. (1998).
345
+ "An efficient quadratic fit-sectioning algorithm for minimization
346
+ without derivatives".
347
+ Computer Methods in Applied Mechanics and Engineering, 152 (1-2),
348
+ 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4
349
+
350
+ See Also
351
+ --------
352
+ golden, brent, bounded
353
+
354
+ Examples
355
+ --------
356
+ >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize
357
+ >>> def f(x, args=1):
358
+ ... return (x - args)**2
359
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5)
360
+ >>> res.x
361
+ 1.0
362
+ >>> c = [1, 1.5, 2]
363
+ >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,))
364
+ >>> res.x
365
+ array([1. , 1.5, 2. ])
366
+ """
367
+ res = _chandrupatla_iv(func, args, xatol, xrtol,
368
+ fatol, frtol, maxiter, callback)
369
+ func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
370
+
371
+ # Initialization
372
+ xs = (x1, x2, x3)
373
+ temp = eim._initialize(func, xs, args)
374
+ func, xs, fs, args, shape, dtype = temp # line split for PEP8
375
+ x1, x2, x3 = xs
376
+ f1, f2, f3 = fs
377
+ phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio
378
+ status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
379
+ nit, nfev = 0, 3 # three function evaluations performed above
380
+ fatol = np.finfo(dtype).tiny if fatol is None else fatol
381
+ frtol = np.finfo(dtype).tiny if frtol is None else frtol
382
+ xatol = np.finfo(dtype).tiny if xatol is None else xatol
383
+ xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol
384
+
385
+ # Ensure that x1 < x2 < x3 initially.
386
+ xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3))
387
+ i = np.argsort(xs, axis=0)
388
+ x1, x2, x3 = np.take_along_axis(xs, i, axis=0)
389
+ f1, f2, f3 = np.take_along_axis(fs, i, axis=0)
390
+ q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7))
391
+
392
+ work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi,
393
+ xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
394
+ nit=nit, nfev=nfev, status=status, q0=q0, args=args)
395
+ res_work_pairs = [('status', 'status'),
396
+ ('x', 'x2'), ('fun', 'f2'),
397
+ ('nit', 'nit'), ('nfev', 'nfev'),
398
+ ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'),
399
+ ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')]
400
+
401
+ def pre_func_eval(work):
402
+ # `_check_termination` is called first -> `x3 - x2 > x2 - x1`
403
+ # But let's calculate a few terms that we'll reuse
404
+ x21 = work.x2 - work.x1
405
+ x32 = work.x3 - work.x2
406
+
407
+ # [1] Section 3. "The quadratic minimum point Q1 is calculated using
408
+ # the relations developed in the previous section." [1] Section 2 (5/6)
409
+ A = x21 * (work.f3 - work.f2)
410
+ B = x32 * (work.f1 - work.f2)
411
+ C = A / (A + B)
412
+ # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2
413
+ q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster
414
+ # this is an array, so multiplying by 0.5 does not change dtype
415
+
416
+ # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is
417
+ # sufficiently away from the inside point x2"
418
+ i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7)
419
+ xi = q1[i]
420
+ # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of
421
+ # x2, the new point is chosen in the larger interval at a distance
422
+ # tol away from x2."
423
+ # See also QBASIC code after "Accept Ql adjust if close to X2".
424
+ j = abs(q1[i] - work.x2[i]) <= work.xtol[i]
425
+ xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j]
426
+
427
+ # "If condition (7) is not satisfied, golden sectioning of the larger
428
+ # interval is carried out to introduce the new point."
429
+ # (For simplicity, we go ahead and calculate it for all points, but we
430
+ # change the elements for which the condition was satisfied.)
431
+ x = work.x2 + (2 - work.phi) * x32
432
+ x[i] = xi
433
+
434
+ # "We define Q0 as the value of Q1 at the previous iteration."
435
+ work.q0 = q1
436
+ return x
437
+
438
+ def post_func_eval(x, f, work):
439
+ # Standard logic for updating a three-point bracket based on a new
440
+ # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...".
441
+ # There is an awful lot of data copying going on here; this would
442
+ # probably benefit from code optimization or implementation in Pythran.
443
+ i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2)
444
+ xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i],
445
+ fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i]
446
+ j = fi > f2i
447
+ x3i[j], f3i[j] = xi[j], fi[j]
448
+ j = ~j
449
+ x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j]
450
+
451
+ ni = ~i
452
+ xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni],
453
+ fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni]
454
+ j = fni > f2ni
455
+ x1ni[j], f1ni[j] = xni[j], fni[j]
456
+ j = ~j
457
+ x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j]
458
+
459
+ work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i
460
+ work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i
461
+ work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni,
462
+ work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni
463
+
464
+ def check_termination(work):
465
+ # Check for all terminal conditions and record statuses.
466
+ stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
467
+
468
+ # Bracket is invalid; stop and don't return minimizer/minimum
469
+ i = ((work.f2 > work.f1) | (work.f2 > work.f3))
470
+ work.x2[i], work.f2[i] = np.nan, np.nan
471
+ stop[i], work.status[i] = True, eim._ESIGNERR
472
+
473
+ # Non-finite values; stop and don't return minimizer/minimum
474
+ finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3)
475
+ i = ~(finite | stop)
476
+ work.x2[i], work.f2[i] = np.nan, np.nan
477
+ stop[i], work.status[i] = True, eim._EVALUEERR
478
+
479
+ # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make
480
+ # the (x2, x3) the larger interval."
481
+ # Note: I had used np.choose; this is much faster. This would be a good
482
+ # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and
483
+ # didn't notice a speed boost, so let's keep it simple.
484
+ i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1)
485
+ temp = work.x1[i]
486
+ work.x1[i] = work.x3[i]
487
+ work.x3[i] = temp
488
+ temp = work.f1[i]
489
+ work.f1[i] = work.f3[i]
490
+ work.f3[i] = temp
491
+
492
+ # [1] Section 3 (bottom of page 212)
493
+ # "We set a tolerance value xtol..."
494
+ work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8)
495
+ # "The convergence based on interval is achieved when..."
496
+ # Note: Equality allowed in case of `xtol=0`
497
+ i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9)
498
+
499
+ # "We define ftol using..."
500
+ ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10)
501
+ # "The convergence based on function values is achieved when..."
502
+ # Note 1: modify in place to incorporate tolerance on function value.
503
+ # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop
504
+ i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11)
505
+ i &= ~stop
506
+ stop[i], work.status[i] = True, eim._ECONVERGED
507
+
508
+ return stop
509
+
510
+ def post_termination_check(work):
511
+ pass
512
+
513
+ def customize_result(res, shape):
514
+ xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
515
+ i = res['xl'] < res['xr']
516
+ res['xl'] = np.choose(i, (xr, xl))
517
+ res['xr'] = np.choose(i, (xl, xr))
518
+ res['fl'] = np.choose(i, (fr, fl))
519
+ res['fr'] = np.choose(i, (fl, fr))
520
+ return shape
521
+
522
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
523
+ pre_func_eval, post_func_eval, check_termination,
524
+ post_termination_check, customize_result, res_work_pairs)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (101 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface to Constrained Optimization By Linear Approximation
3
+
4
+ Functions
5
+ ---------
6
+ .. autosummary::
7
+ :toctree: generated/
8
+
9
+ fmin_cobyla
10
+
11
+ """
12
+
13
+ import functools
14
+ from threading import RLock
15
+
16
+ import numpy as np
17
+ from scipy.optimize import _cobyla as cobyla
18
+ from ._optimize import (OptimizeResult, _check_unknown_options,
19
+ _prepare_scalar_function)
20
+ try:
21
+ from itertools import izip
22
+ except ImportError:
23
+ izip = zip
24
+
25
+ __all__ = ['fmin_cobyla']
26
+
27
+ # Workaround as _cobyla.minimize is not threadsafe
28
+ # due to an unknown f2py bug and can segfault,
29
+ # see gh-9658.
30
+ _module_lock = RLock()
31
+ def synchronized(func):
32
+ @functools.wraps(func)
33
+ def wrapper(*args, **kwargs):
34
+ with _module_lock:
35
+ return func(*args, **kwargs)
36
+ return wrapper
37
+
38
+ @synchronized
39
+ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
40
+ rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4,
41
+ *, callback=None):
42
+ """
43
+ Minimize a function using the Constrained Optimization By Linear
44
+ Approximation (COBYLA) method. This method wraps a FORTRAN
45
+ implementation of the algorithm.
46
+
47
+ Parameters
48
+ ----------
49
+ func : callable
50
+ Function to minimize. In the form func(x, \\*args).
51
+ x0 : ndarray
52
+ Initial guess.
53
+ cons : sequence
54
+ Constraint functions; must all be ``>=0`` (a single function
55
+ if only 1 constraint). Each function takes the parameters `x`
56
+ as its first argument, and it can return either a single number or
57
+ an array or list of numbers.
58
+ args : tuple, optional
59
+ Extra arguments to pass to function.
60
+ consargs : tuple, optional
61
+ Extra arguments to pass to constraint functions (default of None means
62
+ use same extra arguments as those passed to func).
63
+ Use ``()`` for no extra arguments.
64
+ rhobeg : float, optional
65
+ Reasonable initial changes to the variables.
66
+ rhoend : float, optional
67
+ Final accuracy in the optimization (not precisely guaranteed). This
68
+ is a lower bound on the size of the trust region.
69
+ disp : {0, 1, 2, 3}, optional
70
+ Controls the frequency of output; 0 implies no output.
71
+ maxfun : int, optional
72
+ Maximum number of function evaluations.
73
+ catol : float, optional
74
+ Absolute tolerance for constraint violations.
75
+ callback : callable, optional
76
+ Called after each iteration, as ``callback(x)``, where ``x`` is the
77
+ current parameter vector.
78
+
79
+ Returns
80
+ -------
81
+ x : ndarray
82
+ The argument that minimises `f`.
83
+
84
+ See also
85
+ --------
86
+ minimize: Interface to minimization algorithms for multivariate
87
+ functions. See the 'COBYLA' `method` in particular.
88
+
89
+ Notes
90
+ -----
91
+ This algorithm is based on linear approximations to the objective
92
+ function and each constraint. We briefly describe the algorithm.
93
+
94
+ Suppose the function is being minimized over k variables. At the
95
+ jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
96
+ an approximate solution x_j, and a radius RHO_j.
97
+ (i.e., linear plus a constant) approximations to the objective
98
+ function and constraint functions such that their function values
99
+ agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
100
+ This gives a linear program to solve (where the linear approximations
101
+ of the constraint functions are constrained to be non-negative).
102
+
103
+ However, the linear approximations are likely only good
104
+ approximations near the current simplex, so the linear program is
105
+ given the further requirement that the solution, which
106
+ will become x_(j+1), must be within RHO_j from x_j. RHO_j only
107
+ decreases, never increases. The initial RHO_j is rhobeg and the
108
+ final RHO_j is rhoend. In this way COBYLA's iterations behave
109
+ like a trust region algorithm.
110
+
111
+ Additionally, the linear program may be inconsistent, or the
112
+ approximation may give poor improvement. For details about
113
+ how these issues are resolved, as well as how the points v_i are
114
+ updated, refer to the source code or the references below.
115
+
116
+
117
+ References
118
+ ----------
119
+ Powell M.J.D. (1994), "A direct search optimization method that models
120
+ the objective and constraint functions by linear interpolation.", in
121
+ Advances in Optimization and Numerical Analysis, eds. S. Gomez and
122
+ J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
123
+
124
+ Powell M.J.D. (1998), "Direct search algorithms for optimization
125
+ calculations", Acta Numerica 7, 287-336
126
+
127
+ Powell M.J.D. (2007), "A view of algorithms for optimization without
128
+ derivatives", Cambridge University Technical Report DAMTP 2007/NA03
129
+
130
+
131
+ Examples
132
+ --------
133
+ Minimize the objective function f(x,y) = x*y subject
134
+ to the constraints x**2 + y**2 < 1 and y > 0::
135
+
136
+ >>> def objective(x):
137
+ ... return x[0]*x[1]
138
+ ...
139
+ >>> def constr1(x):
140
+ ... return 1 - (x[0]**2 + x[1]**2)
141
+ ...
142
+ >>> def constr2(x):
143
+ ... return x[1]
144
+ ...
145
+ >>> from scipy.optimize import fmin_cobyla
146
+ >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
147
+ array([-0.70710685, 0.70710671])
148
+
149
+ The exact solution is (-sqrt(2)/2, sqrt(2)/2).
150
+
151
+
152
+
153
+ """
154
+ err = "cons must be a sequence of callable functions or a single"\
155
+ " callable function."
156
+ try:
157
+ len(cons)
158
+ except TypeError as e:
159
+ if callable(cons):
160
+ cons = [cons]
161
+ else:
162
+ raise TypeError(err) from e
163
+ else:
164
+ for thisfunc in cons:
165
+ if not callable(thisfunc):
166
+ raise TypeError(err)
167
+
168
+ if consargs is None:
169
+ consargs = args
170
+
171
+ # build constraints
172
+ con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
173
+
174
+ # options
175
+ opts = {'rhobeg': rhobeg,
176
+ 'tol': rhoend,
177
+ 'disp': disp,
178
+ 'maxiter': maxfun,
179
+ 'catol': catol,
180
+ 'callback': callback}
181
+
182
+ sol = _minimize_cobyla(func, x0, args, constraints=con,
183
+ **opts)
184
+ if disp and not sol['success']:
185
+ print(f"COBYLA failed to find a solution: {sol.message}")
186
+ return sol['x']
187
+
188
+
189
+ @synchronized
190
+ def _minimize_cobyla(fun, x0, args=(), constraints=(),
191
+ rhobeg=1.0, tol=1e-4, maxiter=1000,
192
+ disp=False, catol=2e-4, callback=None, bounds=None,
193
+ **unknown_options):
194
+ """
195
+ Minimize a scalar function of one or more variables using the
196
+ Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
197
+
198
+ Options
199
+ -------
200
+ rhobeg : float
201
+ Reasonable initial changes to the variables.
202
+ tol : float
203
+ Final accuracy in the optimization (not precisely guaranteed).
204
+ This is a lower bound on the size of the trust region.
205
+ disp : bool
206
+ Set to True to print convergence messages. If False,
207
+ `verbosity` is ignored as set to 0.
208
+ maxiter : int
209
+ Maximum number of function evaluations.
210
+ catol : float
211
+ Tolerance (absolute) for constraint violations
212
+
213
+ """
214
+ _check_unknown_options(unknown_options)
215
+ maxfun = maxiter
216
+ rhoend = tol
217
+ iprint = int(bool(disp))
218
+
219
+ # check constraints
220
+ if isinstance(constraints, dict):
221
+ constraints = (constraints, )
222
+
223
+ if bounds:
224
+ i_lb = np.isfinite(bounds.lb)
225
+ if np.any(i_lb):
226
+ def lb_constraint(x, *args, **kwargs):
227
+ return x[i_lb] - bounds.lb[i_lb]
228
+
229
+ constraints.append({'type': 'ineq', 'fun': lb_constraint})
230
+
231
+ i_ub = np.isfinite(bounds.ub)
232
+ if np.any(i_ub):
233
+ def ub_constraint(x):
234
+ return bounds.ub[i_ub] - x[i_ub]
235
+
236
+ constraints.append({'type': 'ineq', 'fun': ub_constraint})
237
+
238
+ for ic, con in enumerate(constraints):
239
+ # check type
240
+ try:
241
+ ctype = con['type'].lower()
242
+ except KeyError as e:
243
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
244
+ except TypeError as e:
245
+ raise TypeError('Constraints must be defined using a '
246
+ 'dictionary.') from e
247
+ except AttributeError as e:
248
+ raise TypeError("Constraint's type must be a string.") from e
249
+ else:
250
+ if ctype != 'ineq':
251
+ raise ValueError("Constraints of type '%s' not handled by "
252
+ "COBYLA." % con['type'])
253
+
254
+ # check function
255
+ if 'fun' not in con:
256
+ raise KeyError('Constraint %d has no function defined.' % ic)
257
+
258
+ # check extra arguments
259
+ if 'args' not in con:
260
+ con['args'] = ()
261
+
262
+ # m is the total number of constraint values
263
+ # it takes into account that some constraints may be vector-valued
264
+ cons_lengths = []
265
+ for c in constraints:
266
+ f = c['fun'](x0, *c['args'])
267
+ try:
268
+ cons_length = len(f)
269
+ except TypeError:
270
+ cons_length = 1
271
+ cons_lengths.append(cons_length)
272
+ m = sum(cons_lengths)
273
+
274
+ # create the ScalarFunction, cobyla doesn't require derivative function
275
+ def _jac(x, *args):
276
+ return None
277
+
278
+ sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac)
279
+
280
+ def calcfc(x, con):
281
+ f = sf.fun(x)
282
+ i = 0
283
+ for size, c in izip(cons_lengths, constraints):
284
+ con[i: i + size] = c['fun'](x, *c['args'])
285
+ i += size
286
+ return f
287
+
288
+ def wrapped_callback(x):
289
+ if callback is not None:
290
+ callback(np.copy(x))
291
+
292
+ info = np.zeros(4, np.float64)
293
+ xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
294
+ rhoend=rhoend, iprint=iprint, maxfun=maxfun,
295
+ dinfo=info, callback=wrapped_callback)
296
+
297
+ if info[3] > catol:
298
+ # Check constraint violation
299
+ info[0] = 4
300
+
301
+ return OptimizeResult(x=xopt,
302
+ status=int(info[0]),
303
+ success=info[0] == 1,
304
+ message={1: 'Optimization terminated successfully.',
305
+ 2: 'Maximum number of function evaluations '
306
+ 'has been exceeded.',
307
+ 3: 'Rounding errors are becoming damaging '
308
+ 'in COBYLA subroutine.',
309
+ 4: 'Did not converge to a solution '
310
+ 'satisfying the constraints. See '
311
+ '`maxcv` for magnitude of violation.',
312
+ 5: 'NaN result encountered.'
313
+ }.get(info[0], 'Unknown exit status.'),
314
+ nfev=int(info[1]),
315
+ fun=info[2],
316
+ maxcv=info[3])
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_constraints.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Constraints definition for minimize."""
2
+ import numpy as np
3
+ from ._hessian_update_strategy import BFGS
4
+ from ._differentiable_functions import (
5
+ VectorFunction, LinearVectorFunction, IdentityVectorFunction)
6
+ from ._optimize import OptimizeWarning
7
+ from warnings import warn, catch_warnings, simplefilter, filterwarnings
8
+ from scipy.sparse import issparse
9
+
10
+
11
+ def _arr_to_scalar(x):
12
+ # If x is a numpy array, return x.item(). This will
13
+ # fail if the array has more than one element.
14
+ return x.item() if isinstance(x, np.ndarray) else x
15
+
16
+
17
+ class NonlinearConstraint:
18
+ """Nonlinear constraint on the variables.
19
+
20
+ The constraint has the general inequality form::
21
+
22
+ lb <= fun(x) <= ub
23
+
24
+ Here the vector of independent variables x is passed as ndarray of shape
25
+ (n,) and ``fun`` returns a vector with m components.
26
+
27
+ It is possible to use equal bounds to represent an equality constraint or
28
+ infinite bounds to represent a one-sided constraint.
29
+
30
+ Parameters
31
+ ----------
32
+ fun : callable
33
+ The function defining the constraint.
34
+ The signature is ``fun(x) -> array_like, shape (m,)``.
35
+ lb, ub : array_like
36
+ Lower and upper bounds on the constraint. Each array must have the
37
+ shape (m,) or be a scalar, in the latter case a bound will be the same
38
+ for all components of the constraint. Use ``np.inf`` with an
39
+ appropriate sign to specify a one-sided constraint.
40
+ Set components of `lb` and `ub` equal to represent an equality
41
+ constraint. Note that you can mix constraints of different types:
42
+ interval, one-sided or equality, by setting different components of
43
+ `lb` and `ub` as necessary.
44
+ jac : {callable, '2-point', '3-point', 'cs'}, optional
45
+ Method of computing the Jacobian matrix (an m-by-n matrix,
46
+ where element (i, j) is the partial derivative of f[i] with
47
+ respect to x[j]). The keywords {'2-point', '3-point',
48
+ 'cs'} select a finite difference scheme for the numerical estimation.
49
+ A callable must have the following signature:
50
+ ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
51
+ Default is '2-point'.
52
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
53
+ Method for computing the Hessian matrix. The keywords
54
+ {'2-point', '3-point', 'cs'} select a finite difference scheme for
55
+ numerical estimation. Alternatively, objects implementing
56
+ `HessianUpdateStrategy` interface can be used to approximate the
57
+ Hessian. Currently available implementations are:
58
+
59
+ - `BFGS` (default option)
60
+ - `SR1`
61
+
62
+ A callable must return the Hessian matrix of ``dot(fun, v)`` and
63
+ must have the following signature:
64
+ ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
65
+ Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
66
+ keep_feasible : array_like of bool, optional
67
+ Whether to keep the constraint components feasible throughout
68
+ iterations. A single value set this property for all components.
69
+ Default is False. Has no effect for equality constraints.
70
+ finite_diff_rel_step: None or array_like, optional
71
+ Relative step size for the finite difference approximation. Default is
72
+ None, which will select a reasonable value automatically depending
73
+ on a finite difference scheme.
74
+ finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
75
+ Defines the sparsity structure of the Jacobian matrix for finite
76
+ difference estimation, its shape must be (m, n). If the Jacobian has
77
+ only few non-zero elements in *each* row, providing the sparsity
78
+ structure will greatly speed up the computations. A zero entry means
79
+ that a corresponding element in the Jacobian is identically zero.
80
+ If provided, forces the use of 'lsmr' trust-region solver.
81
+ If None (default) then dense differencing will be used.
82
+
83
+ Notes
84
+ -----
85
+ Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
86
+ approximating either the Jacobian or the Hessian. We, however, do not allow
87
+ its use for approximating both simultaneously. Hence whenever the Jacobian
88
+ is estimated via finite-differences, we require the Hessian to be estimated
89
+ using one of the quasi-Newton strategies.
90
+
91
+ The scheme 'cs' is potentially the most accurate, but requires the function
92
+ to correctly handles complex inputs and be analytically continuable to the
93
+ complex plane. The scheme '3-point' is more accurate than '2-point' but
94
+ requires twice as many operations.
95
+
96
+ Examples
97
+ --------
98
+ Constrain ``x[0] < sin(x[1]) + 1.9``
99
+
100
+ >>> from scipy.optimize import NonlinearConstraint
101
+ >>> import numpy as np
102
+ >>> con = lambda x: x[0] - np.sin(x[1])
103
+ >>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
104
+
105
+ """
106
+ def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
107
+ keep_feasible=False, finite_diff_rel_step=None,
108
+ finite_diff_jac_sparsity=None):
109
+ self.fun = fun
110
+ self.lb = lb
111
+ self.ub = ub
112
+ self.finite_diff_rel_step = finite_diff_rel_step
113
+ self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
114
+ self.jac = jac
115
+ self.hess = hess
116
+ self.keep_feasible = keep_feasible
117
+
118
+
119
+ class LinearConstraint:
120
+ """Linear constraint on the variables.
121
+
122
+ The constraint has the general inequality form::
123
+
124
+ lb <= A.dot(x) <= ub
125
+
126
+ Here the vector of independent variables x is passed as ndarray of shape
127
+ (n,) and the matrix A has shape (m, n).
128
+
129
+ It is possible to use equal bounds to represent an equality constraint or
130
+ infinite bounds to represent a one-sided constraint.
131
+
132
+ Parameters
133
+ ----------
134
+ A : {array_like, sparse matrix}, shape (m, n)
135
+ Matrix defining the constraint.
136
+ lb, ub : dense array_like, optional
137
+ Lower and upper limits on the constraint. Each array must have the
138
+ shape (m,) or be a scalar, in the latter case a bound will be the same
139
+ for all components of the constraint. Use ``np.inf`` with an
140
+ appropriate sign to specify a one-sided constraint.
141
+ Set components of `lb` and `ub` equal to represent an equality
142
+ constraint. Note that you can mix constraints of different types:
143
+ interval, one-sided or equality, by setting different components of
144
+ `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
145
+ and ``ub = np.inf`` (no limits).
146
+ keep_feasible : dense array_like of bool, optional
147
+ Whether to keep the constraint components feasible throughout
148
+ iterations. A single value set this property for all components.
149
+ Default is False. Has no effect for equality constraints.
150
+ """
151
+ def _input_validation(self):
152
+ if self.A.ndim != 2:
153
+ message = "`A` must have exactly two dimensions."
154
+ raise ValueError(message)
155
+
156
+ try:
157
+ shape = self.A.shape[0:1]
158
+ self.lb = np.broadcast_to(self.lb, shape)
159
+ self.ub = np.broadcast_to(self.ub, shape)
160
+ self.keep_feasible = np.broadcast_to(self.keep_feasible, shape)
161
+ except ValueError:
162
+ message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable "
163
+ "to shape `A.shape[0:1]`")
164
+ raise ValueError(message)
165
+
166
+ def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False):
167
+ if not issparse(A):
168
+ # In some cases, if the constraint is not valid, this emits a
169
+ # VisibleDeprecationWarning about ragged nested sequences
170
+ # before eventually causing an error. `scipy.optimize.milp` would
171
+ # prefer that this just error out immediately so it can handle it
172
+ # rather than concerning the user.
173
+ with catch_warnings():
174
+ simplefilter("error")
175
+ self.A = np.atleast_2d(A).astype(np.float64)
176
+ else:
177
+ self.A = A
178
+ if issparse(lb) or issparse(ub):
179
+ raise ValueError("Constraint limits must be dense arrays.")
180
+ self.lb = np.atleast_1d(lb).astype(np.float64)
181
+ self.ub = np.atleast_1d(ub).astype(np.float64)
182
+
183
+ if issparse(keep_feasible):
184
+ raise ValueError("`keep_feasible` must be a dense array.")
185
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
186
+ self._input_validation()
187
+
188
+ def residual(self, x):
189
+ """
190
+ Calculate the residual between the constraint function and the limits
191
+
192
+ For a linear constraint of the form::
193
+
194
+ lb <= A@x <= ub
195
+
196
+ the lower and upper residuals between ``A@x`` and the limits are values
197
+ ``sl`` and ``sb`` such that::
198
+
199
+ lb + sl == A@x == ub - sb
200
+
201
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
202
+ the constraint are satisfied; a negative element in ``sl`` or ``sb``
203
+ indicates that the corresponding element of the constraint is not
204
+ satisfied.
205
+
206
+ Parameters
207
+ ----------
208
+ x: array_like
209
+ Vector of independent variables
210
+
211
+ Returns
212
+ -------
213
+ sl, sb : array-like
214
+ The lower and upper residuals
215
+ """
216
+ return self.A@x - self.lb, self.ub - self.A@x
217
+
218
+
219
+ class Bounds:
220
+ """Bounds constraint on the variables.
221
+
222
+ The constraint has the general inequality form::
223
+
224
+ lb <= x <= ub
225
+
226
+ It is possible to use equal bounds to represent an equality constraint or
227
+ infinite bounds to represent a one-sided constraint.
228
+
229
+ Parameters
230
+ ----------
231
+ lb, ub : dense array_like, optional
232
+ Lower and upper bounds on independent variables. `lb`, `ub`, and
233
+ `keep_feasible` must be the same shape or broadcastable.
234
+ Set components of `lb` and `ub` equal
235
+ to fix a variable. Use ``np.inf`` with an appropriate sign to disable
236
+ bounds on all or some variables. Note that you can mix constraints of
237
+ different types: interval, one-sided or equality, by setting different
238
+ components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
239
+ and ``ub = np.inf`` (no bounds).
240
+ keep_feasible : dense array_like of bool, optional
241
+ Whether to keep the constraint components feasible throughout
242
+ iterations. Must be broadcastable with `lb` and `ub`.
243
+ Default is False. Has no effect for equality constraints.
244
+ """
245
+ def _input_validation(self):
246
+ try:
247
+ res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible)
248
+ self.lb, self.ub, self.keep_feasible = res
249
+ except ValueError:
250
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
251
+ raise ValueError(message)
252
+
253
+ def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False):
254
+ if issparse(lb) or issparse(ub):
255
+ raise ValueError("Lower and upper bounds must be dense arrays.")
256
+ self.lb = np.atleast_1d(lb)
257
+ self.ub = np.atleast_1d(ub)
258
+
259
+ if issparse(keep_feasible):
260
+ raise ValueError("`keep_feasible` must be a dense array.")
261
+ self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
262
+ self._input_validation()
263
+
264
+ def __repr__(self):
265
+ start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
266
+ if np.any(self.keep_feasible):
267
+ end = f", keep_feasible={self.keep_feasible!r})"
268
+ else:
269
+ end = ")"
270
+ return start + end
271
+
272
+ def residual(self, x):
273
+ """Calculate the residual (slack) between the input and the bounds
274
+
275
+ For a bound constraint of the form::
276
+
277
+ lb <= x <= ub
278
+
279
+ the lower and upper residuals between `x` and the bounds are values
280
+ ``sl`` and ``sb`` such that::
281
+
282
+ lb + sl == x == ub - sb
283
+
284
+ When all elements of ``sl`` and ``sb`` are positive, all elements of
285
+ ``x`` lie within the bounds; a negative element in ``sl`` or ``sb``
286
+ indicates that the corresponding element of ``x`` is out of bounds.
287
+
288
+ Parameters
289
+ ----------
290
+ x: array_like
291
+ Vector of independent variables
292
+
293
+ Returns
294
+ -------
295
+ sl, sb : array-like
296
+ The lower and upper residuals
297
+ """
298
+ return x - self.lb, self.ub - x
299
+
300
+
301
+ class PreparedConstraint:
302
+ """Constraint prepared from a user defined constraint.
303
+
304
+ On creation it will check whether a constraint definition is valid and
305
+ the initial point is feasible. If created successfully, it will contain
306
+ the attributes listed below.
307
+
308
+ Parameters
309
+ ----------
310
+ constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
311
+ Constraint to check and prepare.
312
+ x0 : array_like
313
+ Initial vector of independent variables.
314
+ sparse_jacobian : bool or None, optional
315
+ If bool, then the Jacobian of the constraint will be converted
316
+ to the corresponded format if necessary. If None (default), such
317
+ conversion is not made.
318
+ finite_diff_bounds : 2-tuple, optional
319
+ Lower and upper bounds on the independent variables for the finite
320
+ difference approximation, if applicable. Defaults to no bounds.
321
+
322
+ Attributes
323
+ ----------
324
+ fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
325
+ Function defining the constraint wrapped by one of the convenience
326
+ classes.
327
+ bounds : 2-tuple
328
+ Contains lower and upper bounds for the constraints --- lb and ub.
329
+ These are converted to ndarray and have a size equal to the number of
330
+ the constraints.
331
+ keep_feasible : ndarray
332
+ Array indicating which components must be kept feasible with a size
333
+ equal to the number of the constraints.
334
+ """
335
+ def __init__(self, constraint, x0, sparse_jacobian=None,
336
+ finite_diff_bounds=(-np.inf, np.inf)):
337
+ if isinstance(constraint, NonlinearConstraint):
338
+ fun = VectorFunction(constraint.fun, x0,
339
+ constraint.jac, constraint.hess,
340
+ constraint.finite_diff_rel_step,
341
+ constraint.finite_diff_jac_sparsity,
342
+ finite_diff_bounds, sparse_jacobian)
343
+ elif isinstance(constraint, LinearConstraint):
344
+ fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
345
+ elif isinstance(constraint, Bounds):
346
+ fun = IdentityVectorFunction(x0, sparse_jacobian)
347
+ else:
348
+ raise ValueError("`constraint` of an unknown type is passed.")
349
+
350
+ m = fun.m
351
+
352
+ lb = np.asarray(constraint.lb, dtype=float)
353
+ ub = np.asarray(constraint.ub, dtype=float)
354
+ keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
355
+
356
+ lb = np.broadcast_to(lb, m)
357
+ ub = np.broadcast_to(ub, m)
358
+ keep_feasible = np.broadcast_to(keep_feasible, m)
359
+
360
+ if keep_feasible.shape != (m,):
361
+ raise ValueError("`keep_feasible` has a wrong shape.")
362
+
363
+ mask = keep_feasible & (lb != ub)
364
+ f0 = fun.f
365
+ if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
366
+ raise ValueError("`x0` is infeasible with respect to some "
367
+ "inequality constraint with `keep_feasible` "
368
+ "set to True.")
369
+
370
+ self.fun = fun
371
+ self.bounds = (lb, ub)
372
+ self.keep_feasible = keep_feasible
373
+
374
+ def violation(self, x):
375
+ """How much the constraint is exceeded by.
376
+
377
+ Parameters
378
+ ----------
379
+ x : array-like
380
+ Vector of independent variables
381
+
382
+ Returns
383
+ -------
384
+ excess : array-like
385
+ How much the constraint is exceeded by, for each of the
386
+ constraints specified by `PreparedConstraint.fun`.
387
+ """
388
+ with catch_warnings():
389
+ # Ignore the following warning, it's not important when
390
+ # figuring out total violation
391
+ # UserWarning: delta_grad == 0.0. Check if the approximated
392
+ # function is linear
393
+ filterwarnings("ignore", "delta_grad", UserWarning)
394
+ ev = self.fun.fun(np.asarray(x))
395
+
396
+ excess_lb = np.maximum(self.bounds[0] - ev, 0)
397
+ excess_ub = np.maximum(ev - self.bounds[1], 0)
398
+
399
+ return excess_lb + excess_ub
400
+
401
+
402
+ def new_bounds_to_old(lb, ub, n):
403
+ """Convert the new bounds representation to the old one.
404
+
405
+ The new representation is a tuple (lb, ub) and the old one is a list
406
+ containing n tuples, ith containing lower and upper bound on a ith
407
+ variable.
408
+ If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
409
+ None.
410
+ """
411
+ lb = np.broadcast_to(lb, n)
412
+ ub = np.broadcast_to(ub, n)
413
+
414
+ lb = [float(x) if x > -np.inf else None for x in lb]
415
+ ub = [float(x) if x < np.inf else None for x in ub]
416
+
417
+ return list(zip(lb, ub))
418
+
419
+
420
+ def old_bound_to_new(bounds):
421
+ """Convert the old bounds representation to the new one.
422
+
423
+ The new representation is a tuple (lb, ub) and the old one is a list
424
+ containing n tuples, ith containing lower and upper bound on a ith
425
+ variable.
426
+ If any of the entries in lb/ub are None they are replaced by
427
+ -np.inf/np.inf.
428
+ """
429
+ lb, ub = zip(*bounds)
430
+
431
+ # Convert occurrences of None to -inf or inf, and replace occurrences of
432
+ # any numpy array x with x.item(). Then wrap the results in numpy arrays.
433
+ lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
434
+ for x in lb])
435
+ ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
436
+ for x in ub])
437
+
438
+ return lb, ub
439
+
440
+
441
+ def strict_bounds(lb, ub, keep_feasible, n_vars):
442
+ """Remove bounds which are not asked to be kept feasible."""
443
+ strict_lb = np.resize(lb, n_vars).astype(float)
444
+ strict_ub = np.resize(ub, n_vars).astype(float)
445
+ keep_feasible = np.resize(keep_feasible, n_vars)
446
+ strict_lb[~keep_feasible] = -np.inf
447
+ strict_ub[~keep_feasible] = np.inf
448
+ return strict_lb, strict_ub
449
+
450
+
451
+ def new_constraint_to_old(con, x0):
452
+ """
453
+ Converts new-style constraint objects to old-style constraint dictionaries.
454
+ """
455
+ if isinstance(con, NonlinearConstraint):
456
+ if (con.finite_diff_jac_sparsity is not None or
457
+ con.finite_diff_rel_step is not None or
458
+ not isinstance(con.hess, BFGS) or # misses user specified BFGS
459
+ con.keep_feasible):
460
+ warn("Constraint options `finite_diff_jac_sparsity`, "
461
+ "`finite_diff_rel_step`, `keep_feasible`, and `hess`"
462
+ "are ignored by this method.",
463
+ OptimizeWarning, stacklevel=3)
464
+
465
+ fun = con.fun
466
+ if callable(con.jac):
467
+ jac = con.jac
468
+ else:
469
+ jac = None
470
+
471
+ else: # LinearConstraint
472
+ if np.any(con.keep_feasible):
473
+ warn("Constraint option `keep_feasible` is ignored by this method.",
474
+ OptimizeWarning, stacklevel=3)
475
+
476
+ A = con.A
477
+ if issparse(A):
478
+ A = A.toarray()
479
+ def fun(x):
480
+ return np.dot(A, x)
481
+ def jac(x):
482
+ return A
483
+
484
+ # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
485
+ # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
486
+ pcon = PreparedConstraint(con, x0)
487
+ lb, ub = pcon.bounds
488
+
489
+ i_eq = lb == ub
490
+ i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
491
+ i_bound_above = np.logical_xor(ub != np.inf, i_eq)
492
+ i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
493
+
494
+ if np.any(i_unbounded):
495
+ warn("At least one constraint is unbounded above and below. Such "
496
+ "constraints are ignored.",
497
+ OptimizeWarning, stacklevel=3)
498
+
499
+ ceq = []
500
+ if np.any(i_eq):
501
+ def f_eq(x):
502
+ y = np.array(fun(x)).flatten()
503
+ return y[i_eq] - lb[i_eq]
504
+ ceq = [{"type": "eq", "fun": f_eq}]
505
+
506
+ if jac is not None:
507
+ def j_eq(x):
508
+ dy = jac(x)
509
+ if issparse(dy):
510
+ dy = dy.toarray()
511
+ dy = np.atleast_2d(dy)
512
+ return dy[i_eq, :]
513
+ ceq[0]["jac"] = j_eq
514
+
515
+ cineq = []
516
+ n_bound_below = np.sum(i_bound_below)
517
+ n_bound_above = np.sum(i_bound_above)
518
+ if n_bound_below + n_bound_above:
519
+ def f_ineq(x):
520
+ y = np.zeros(n_bound_below + n_bound_above)
521
+ y_all = np.array(fun(x)).flatten()
522
+ y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
523
+ y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
524
+ return y
525
+ cineq = [{"type": "ineq", "fun": f_ineq}]
526
+
527
+ if jac is not None:
528
+ def j_ineq(x):
529
+ dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
530
+ dy_all = jac(x)
531
+ if issparse(dy_all):
532
+ dy_all = dy_all.toarray()
533
+ dy_all = np.atleast_2d(dy_all)
534
+ dy[:n_bound_below, :] = dy_all[i_bound_below]
535
+ dy[n_bound_below:, :] = -dy_all[i_bound_above]
536
+ return dy
537
+ cineq[0]["jac"] = j_ineq
538
+
539
+ old_constraints = ceq + cineq
540
+
541
+ if len(old_constraints) > 1:
542
+ warn("Equality and inequality constraints are specified in the same "
543
+ "element of the constraint list. For efficient use with this "
544
+ "method, equality and inequality constraints should be specified "
545
+ "in separate elements of the constraint list. ",
546
+ OptimizeWarning, stacklevel=3)
547
+ return old_constraints
548
+
549
+
550
+ def old_constraint_to_new(ic, con):
551
+ """
552
+ Converts old-style constraint dictionaries to new-style constraint objects.
553
+ """
554
+ # check type
555
+ try:
556
+ ctype = con['type'].lower()
557
+ except KeyError as e:
558
+ raise KeyError('Constraint %d has no type defined.' % ic) from e
559
+ except TypeError as e:
560
+ raise TypeError(
561
+ 'Constraints must be a sequence of dictionaries.'
562
+ ) from e
563
+ except AttributeError as e:
564
+ raise TypeError("Constraint's type must be a string.") from e
565
+ else:
566
+ if ctype not in ['eq', 'ineq']:
567
+ raise ValueError("Unknown constraint type '%s'." % con['type'])
568
+ if 'fun' not in con:
569
+ raise ValueError('Constraint %d has no function defined.' % ic)
570
+
571
+ lb = 0
572
+ if ctype == 'eq':
573
+ ub = 0
574
+ else:
575
+ ub = np.inf
576
+
577
+ jac = '2-point'
578
+ if 'args' in con:
579
+ args = con['args']
580
+ def fun(x):
581
+ return con["fun"](x, *args)
582
+ if 'jac' in con:
583
+ def jac(x):
584
+ return con["jac"](x, *args)
585
+ else:
586
+ fun = con['fun']
587
+ if 'jac' in con:
588
+ jac = con['jac']
589
+
590
+ return NonlinearConstraint(fun, lb, ub, jac)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ """
4
+ # 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python
5
+ c MINPACK-1 Project. June 1983.
6
+ c Argonne National Laboratory.
7
+ c Jorge J. More' and David J. Thuente.
8
+ c
9
+ c MINPACK-2 Project. November 1993.
10
+ c Argonne National Laboratory and University of Minnesota.
11
+ c Brett M. Averick, Richard G. Carter, and Jorge J. More'.
12
+ """
13
+
14
+ # NOTE this file was linted by black on first commit, and can be kept that way.
15
+
16
+
17
+ class DCSRCH:
18
+ """
19
+ Parameters
20
+ ----------
21
+ phi : callable phi(alpha)
22
+ Function at point `alpha`
23
+ derphi : callable phi'(alpha)
24
+ Objective function derivative. Returns a scalar.
25
+ ftol : float
26
+ A nonnegative tolerance for the sufficient decrease condition.
27
+ gtol : float
28
+ A nonnegative tolerance for the curvature condition.
29
+ xtol : float
30
+ A nonnegative relative tolerance for an acceptable step. The
31
+ subroutine exits with a warning if the relative difference between
32
+ sty and stx is less than xtol.
33
+ stpmin : float
34
+ A nonnegative lower bound for the step.
35
+ stpmax :
36
+ A nonnegative upper bound for the step.
37
+
38
+ Notes
39
+ -----
40
+
41
+ This subroutine finds a step that satisfies a sufficient
42
+ decrease condition and a curvature condition.
43
+
44
+ Each call of the subroutine updates an interval with
45
+ endpoints stx and sty. The interval is initially chosen
46
+ so that it contains a minimizer of the modified function
47
+
48
+ psi(stp) = f(stp) - f(0) - ftol*stp*f'(0).
49
+
50
+ If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
51
+ interval is chosen so that it contains a minimizer of f.
52
+
53
+ The algorithm is designed to find a step that satisfies
54
+ the sufficient decrease condition
55
+
56
+ f(stp) <= f(0) + ftol*stp*f'(0),
57
+
58
+ and the curvature condition
59
+
60
+ abs(f'(stp)) <= gtol*abs(f'(0)).
61
+
62
+ If ftol is less than gtol and if, for example, the function
63
+ is bounded below, then there is always a step which satisfies
64
+ both conditions.
65
+
66
+ If no step can be found that satisfies both conditions, then
67
+ the algorithm stops with a warning. In this case stp only
68
+ satisfies the sufficient decrease condition.
69
+
70
+ A typical invocation of dcsrch has the following outline:
71
+
72
+ Evaluate the function at stp = 0.0d0; store in f.
73
+ Evaluate the gradient at stp = 0.0d0; store in g.
74
+ Choose a starting step stp.
75
+
76
+ task = 'START'
77
+ 10 continue
78
+ call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax,
79
+ isave,dsave)
80
+ if (task .eq. 'FG') then
81
+ Evaluate the function and the gradient at stp
82
+ go to 10
83
+ end if
84
+
85
+ NOTE: The user must not alter work arrays between calls.
86
+
87
+ The subroutine statement is
88
+
89
+ subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax,
90
+ task,isave,dsave)
91
+ where
92
+
93
+ stp is a double precision variable.
94
+ On entry stp is the current estimate of a satisfactory
95
+ step. On initial entry, a positive initial estimate
96
+ must be provided.
97
+ On exit stp is the current estimate of a satisfactory step
98
+ if task = 'FG'. If task = 'CONV' then stp satisfies
99
+ the sufficient decrease and curvature condition.
100
+
101
+ f is a double precision variable.
102
+ On initial entry f is the value of the function at 0.
103
+ On subsequent entries f is the value of the
104
+ function at stp.
105
+ On exit f is the value of the function at stp.
106
+
107
+ g is a double precision variable.
108
+ On initial entry g is the derivative of the function at 0.
109
+ On subsequent entries g is the derivative of the
110
+ function at stp.
111
+ On exit g is the derivative of the function at stp.
112
+
113
+ ftol is a double precision variable.
114
+ On entry ftol specifies a nonnegative tolerance for the
115
+ sufficient decrease condition.
116
+ On exit ftol is unchanged.
117
+
118
+ gtol is a double precision variable.
119
+ On entry gtol specifies a nonnegative tolerance for the
120
+ curvature condition.
121
+ On exit gtol is unchanged.
122
+
123
+ xtol is a double precision variable.
124
+ On entry xtol specifies a nonnegative relative tolerance
125
+ for an acceptable step. The subroutine exits with a
126
+ warning if the relative difference between sty and stx
127
+ is less than xtol.
128
+
129
+ On exit xtol is unchanged.
130
+
131
+ task is a character variable of length at least 60.
132
+ On initial entry task must be set to 'START'.
133
+ On exit task indicates the required action:
134
+
135
+ If task(1:2) = 'FG' then evaluate the function and
136
+ derivative at stp and call dcsrch again.
137
+
138
+ If task(1:4) = 'CONV' then the search is successful.
139
+
140
+ If task(1:4) = 'WARN' then the subroutine is not able
141
+ to satisfy the convergence conditions. The exit value of
142
+ stp contains the best point found during the search.
143
+
144
+ If task(1:5) = 'ERROR' then there is an error in the
145
+ input arguments.
146
+
147
+ On exit with convergence, a warning or an error, the
148
+ variable task contains additional information.
149
+
150
+ stpmin is a double precision variable.
151
+ On entry stpmin is a nonnegative lower bound for the step.
152
+ On exit stpmin is unchanged.
153
+
154
+ stpmax is a double precision variable.
155
+ On entry stpmax is a nonnegative upper bound for the step.
156
+ On exit stpmax is unchanged.
157
+
158
+ isave is an integer work array of dimension 2.
159
+
160
+ dsave is a double precision work array of dimension 13.
161
+
162
+ Subprograms called
163
+
164
+ MINPACK-2 ... dcstep
165
+ MINPACK-1 Project. June 1983.
166
+ Argonne National Laboratory.
167
+ Jorge J. More' and David J. Thuente.
168
+
169
+ MINPACK-2 Project. November 1993.
170
+ Argonne National Laboratory and University of Minnesota.
171
+ Brett M. Averick, Richard G. Carter, and Jorge J. More'.
172
+ """
173
+
174
+ def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax):
175
+ self.stage = None
176
+ self.ginit = None
177
+ self.gtest = None
178
+ self.gx = None
179
+ self.gy = None
180
+ self.finit = None
181
+ self.fx = None
182
+ self.fy = None
183
+ self.stx = None
184
+ self.sty = None
185
+ self.stmin = None
186
+ self.stmax = None
187
+ self.width = None
188
+ self.width1 = None
189
+
190
+ # leave all assessment of tolerances/limits to the first call of
191
+ # this object
192
+ self.ftol = ftol
193
+ self.gtol = gtol
194
+ self.xtol = xtol
195
+ self.stpmin = stpmin
196
+ self.stpmax = stpmax
197
+
198
+ self.phi = phi
199
+ self.derphi = derphi
200
+
201
+ def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100):
202
+ """
203
+ Parameters
204
+ ----------
205
+ alpha1 : float
206
+ alpha1 is the current estimate of a satisfactory
207
+ step. A positive initial estimate must be provided.
208
+ phi0 : float
209
+ the value of `phi` at 0 (if known).
210
+ derphi0 : float
211
+ the derivative of `derphi` at 0 (if known).
212
+ maxiter : int
213
+
214
+ Returns
215
+ -------
216
+ alpha : float
217
+ Step size, or None if no suitable step was found.
218
+ phi : float
219
+ Value of `phi` at the new point `alpha`.
220
+ phi0 : float
221
+ Value of `phi` at `alpha=0`.
222
+ task : bytes
223
+ On exit task indicates status information.
224
+
225
+ If task[:4] == b'CONV' then the search is successful.
226
+
227
+ If task[:4] == b'WARN' then the subroutine is not able
228
+ to satisfy the convergence conditions. The exit value of
229
+ stp contains the best point found during the search.
230
+
231
+ If task[:5] == b'ERROR' then there is an error in the
232
+ input arguments.
233
+ """
234
+ if phi0 is None:
235
+ phi0 = self.phi(0.0)
236
+ if derphi0 is None:
237
+ derphi0 = self.derphi(0.0)
238
+
239
+ phi1 = phi0
240
+ derphi1 = derphi0
241
+
242
+ task = b"START"
243
+ for i in range(maxiter):
244
+ stp, phi1, derphi1, task = self._iterate(
245
+ alpha1, phi1, derphi1, task
246
+ )
247
+
248
+ if not np.isfinite(stp):
249
+ task = b"WARN"
250
+ stp = None
251
+ break
252
+
253
+ if task[:2] == b"FG":
254
+ alpha1 = stp
255
+ phi1 = self.phi(stp)
256
+ derphi1 = self.derphi(stp)
257
+ else:
258
+ break
259
+ else:
260
+ # maxiter reached, the line search did not converge
261
+ stp = None
262
+ task = b"WARNING: dcsrch did not converge within max iterations"
263
+
264
+ if task[:5] == b"ERROR" or task[:4] == b"WARN":
265
+ stp = None # failed
266
+
267
+ return stp, phi1, phi0, task
268
+
269
+ def _iterate(self, stp, f, g, task):
270
+ """
271
+ Parameters
272
+ ----------
273
+ stp : float
274
+ The current estimate of a satisfactory step. On initial entry, a
275
+ positive initial estimate must be provided.
276
+ f : float
277
+ On first call f is the value of the function at 0. On subsequent
278
+ entries f should be the value of the function at stp.
279
+ g : float
280
+ On initial entry g is the derivative of the function at 0. On
281
+ subsequent entries g is the derivative of the function at stp.
282
+ task : bytes
283
+ On initial entry task must be set to 'START'.
284
+
285
+ On exit with convergence, a warning or an error, the
286
+ variable task contains additional information.
287
+
288
+
289
+ Returns
290
+ -------
291
+ stp, f, g, task: tuple
292
+
293
+ stp : float
294
+ the current estimate of a satisfactory step if task = 'FG'. If
295
+ task = 'CONV' then stp satisfies the sufficient decrease and
296
+ curvature condition.
297
+ f : float
298
+ the value of the function at stp.
299
+ g : float
300
+ the derivative of the function at stp.
301
+ task : bytes
302
+ On exit task indicates the required action:
303
+
304
+ If task(1:2) == b'FG' then evaluate the function and
305
+ derivative at stp and call dcsrch again.
306
+
307
+ If task(1:4) == b'CONV' then the search is successful.
308
+
309
+ If task(1:4) == b'WARN' then the subroutine is not able
310
+ to satisfy the convergence conditions. The exit value of
311
+ stp contains the best point found during the search.
312
+
313
+ If task(1:5) == b'ERROR' then there is an error in the
314
+ input arguments.
315
+ """
316
+ p5 = 0.5
317
+ p66 = 0.66
318
+ xtrapl = 1.1
319
+ xtrapu = 4.0
320
+
321
+ if task[:5] == b"START":
322
+ if stp < self.stpmin:
323
+ task = b"ERROR: STP .LT. STPMIN"
324
+ if stp > self.stpmax:
325
+ task = b"ERROR: STP .GT. STPMAX"
326
+ if g >= 0:
327
+ task = b"ERROR: INITIAL G .GE. ZERO"
328
+ if self.ftol < 0:
329
+ task = b"ERROR: FTOL .LT. ZERO"
330
+ if self.gtol < 0:
331
+ task = b"ERROR: GTOL .LT. ZERO"
332
+ if self.xtol < 0:
333
+ task = b"ERROR: XTOL .LT. ZERO"
334
+ if self.stpmin < 0:
335
+ task = b"ERROR: STPMIN .LT. ZERO"
336
+ if self.stpmax < self.stpmin:
337
+ task = b"ERROR: STPMAX .LT. STPMIN"
338
+
339
+ if task[:5] == b"ERROR":
340
+ return stp, f, g, task
341
+
342
+ # Initialize local variables.
343
+
344
+ self.brackt = False
345
+ self.stage = 1
346
+ self.finit = f
347
+ self.ginit = g
348
+ self.gtest = self.ftol * self.ginit
349
+ self.width = self.stpmax - self.stpmin
350
+ self.width1 = self.width / p5
351
+
352
+ # The variables stx, fx, gx contain the values of the step,
353
+ # function, and derivative at the best step.
354
+ # The variables sty, fy, gy contain the value of the step,
355
+ # function, and derivative at sty.
356
+ # The variables stp, f, g contain the values of the step,
357
+ # function, and derivative at stp.
358
+
359
+ self.stx = 0.0
360
+ self.fx = self.finit
361
+ self.gx = self.ginit
362
+ self.sty = 0.0
363
+ self.fy = self.finit
364
+ self.gy = self.ginit
365
+ self.stmin = 0
366
+ self.stmax = stp + xtrapu * stp
367
+ task = b"FG"
368
+ return stp, f, g, task
369
+
370
+ # in the original Fortran this was a location to restore variables
371
+ # we don't need to do that because they're attributes.
372
+
373
+ # If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
374
+ # algorithm enters the second stage.
375
+ ftest = self.finit + stp * self.gtest
376
+
377
+ if self.stage == 1 and f <= ftest and g >= 0:
378
+ self.stage = 2
379
+
380
+ # test for warnings
381
+ if self.brackt and (stp <= self.stmin or stp >= self.stmax):
382
+ task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS"
383
+ if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax:
384
+ task = b"WARNING: XTOL TEST SATISFIED"
385
+ if stp == self.stpmax and f <= ftest and g <= self.gtest:
386
+ task = b"WARNING: STP = STPMAX"
387
+ if stp == self.stpmin and (f > ftest or g >= self.gtest):
388
+ task = b"WARNING: STP = STPMIN"
389
+
390
+ # test for convergence
391
+ if f <= ftest and abs(g) <= self.gtol * -self.ginit:
392
+ task = b"CONVERGENCE"
393
+
394
+ # test for termination
395
+ if task[:4] == b"WARN" or task[:4] == b"CONV":
396
+ return stp, f, g, task
397
+
398
+ # A modified function is used to predict the step during the
399
+ # first stage if a lower function value has been obtained but
400
+ # the decrease is not sufficient.
401
+ if self.stage == 1 and f <= self.fx and f > ftest:
402
+ # Define the modified function and derivative values.
403
+ fm = f - stp * self.gtest
404
+ fxm = self.fx - self.stx * self.gtest
405
+ fym = self.fy - self.sty * self.gtest
406
+ gm = g - self.gtest
407
+ gxm = self.gx - self.gtest
408
+ gym = self.gy - self.gtest
409
+
410
+ # Call dcstep to update stx, sty, and to compute the new step.
411
+ # dcstep can have several operations which can produce NaN
412
+ # e.g. inf/inf. Filter these out.
413
+ with np.errstate(invalid="ignore", over="ignore"):
414
+ tup = dcstep(
415
+ self.stx,
416
+ fxm,
417
+ gxm,
418
+ self.sty,
419
+ fym,
420
+ gym,
421
+ stp,
422
+ fm,
423
+ gm,
424
+ self.brackt,
425
+ self.stmin,
426
+ self.stmax,
427
+ )
428
+ self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup
429
+
430
+ # Reset the function and derivative values for f
431
+ self.fx = fxm + self.stx * self.gtest
432
+ self.fy = fym + self.sty * self.gtest
433
+ self.gx = gxm + self.gtest
434
+ self.gy = gym + self.gtest
435
+
436
+ else:
437
+ # Call dcstep to update stx, sty, and to compute the new step.
438
+ # dcstep can have several operations which can produce NaN
439
+ # e.g. inf/inf. Filter these out.
440
+
441
+ with np.errstate(invalid="ignore", over="ignore"):
442
+ tup = dcstep(
443
+ self.stx,
444
+ self.fx,
445
+ self.gx,
446
+ self.sty,
447
+ self.fy,
448
+ self.gy,
449
+ stp,
450
+ f,
451
+ g,
452
+ self.brackt,
453
+ self.stmin,
454
+ self.stmax,
455
+ )
456
+ (
457
+ self.stx,
458
+ self.fx,
459
+ self.gx,
460
+ self.sty,
461
+ self.fy,
462
+ self.gy,
463
+ stp,
464
+ self.brackt,
465
+ ) = tup
466
+
467
+ # Decide if a bisection step is needed
468
+ if self.brackt:
469
+ if abs(self.sty - self.stx) >= p66 * self.width1:
470
+ stp = self.stx + p5 * (self.sty - self.stx)
471
+ self.width1 = self.width
472
+ self.width = abs(self.sty - self.stx)
473
+
474
+ # Set the minimum and maximum steps allowed for stp.
475
+ if self.brackt:
476
+ self.stmin = min(self.stx, self.sty)
477
+ self.stmax = max(self.stx, self.sty)
478
+ else:
479
+ self.stmin = stp + xtrapl * (stp - self.stx)
480
+ self.stmax = stp + xtrapu * (stp - self.stx)
481
+
482
+ # Force the step to be within the bounds stpmax and stpmin.
483
+ stp = np.clip(stp, self.stpmin, self.stpmax)
484
+
485
+ # If further progress is not possible, let stp be the best
486
+ # point obtained during the search.
487
+ if (
488
+ self.brackt
489
+ and (stp <= self.stmin or stp >= self.stmax)
490
+ or (
491
+ self.brackt
492
+ and self.stmax - self.stmin <= self.xtol * self.stmax
493
+ )
494
+ ):
495
+ stp = self.stx
496
+
497
+ # Obtain another function and derivative
498
+ task = b"FG"
499
+ return stp, f, g, task
500
+
501
+
502
+ def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):
503
+ """
504
+ Subroutine dcstep
505
+
506
+ This subroutine computes a safeguarded step for a search
507
+ procedure and updates an interval that contains a step that
508
+ satisfies a sufficient decrease and a curvature condition.
509
+
510
+ The parameter stx contains the step with the least function
511
+ value. If brackt is set to .true. then a minimizer has
512
+ been bracketed in an interval with endpoints stx and sty.
513
+ The parameter stp contains the current step.
514
+ The subroutine assumes that if brackt is set to .true. then
515
+
516
+ min(stx,sty) < stp < max(stx,sty),
517
+
518
+ and that the derivative at stx is negative in the direction
519
+ of the step.
520
+
521
+ The subroutine statement is
522
+
523
+ subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,
524
+ stpmin,stpmax)
525
+
526
+ where
527
+
528
+ stx is a double precision variable.
529
+ On entry stx is the best step obtained so far and is an
530
+ endpoint of the interval that contains the minimizer.
531
+ On exit stx is the updated best step.
532
+
533
+ fx is a double precision variable.
534
+ On entry fx is the function at stx.
535
+ On exit fx is the function at stx.
536
+
537
+ dx is a double precision variable.
538
+ On entry dx is the derivative of the function at
539
+ stx. The derivative must be negative in the direction of
540
+ the step, that is, dx and stp - stx must have opposite
541
+ signs.
542
+ On exit dx is the derivative of the function at stx.
543
+
544
+ sty is a double precision variable.
545
+ On entry sty is the second endpoint of the interval that
546
+ contains the minimizer.
547
+ On exit sty is the updated endpoint of the interval that
548
+ contains the minimizer.
549
+
550
+ fy is a double precision variable.
551
+ On entry fy is the function at sty.
552
+ On exit fy is the function at sty.
553
+
554
+ dy is a double precision variable.
555
+ On entry dy is the derivative of the function at sty.
556
+ On exit dy is the derivative of the function at the exit sty.
557
+
558
+ stp is a double precision variable.
559
+ On entry stp is the current step. If brackt is set to .true.
560
+ then on input stp must be between stx and sty.
561
+ On exit stp is a new trial step.
562
+
563
+ fp is a double precision variable.
564
+ On entry fp is the function at stp
565
+ On exit fp is unchanged.
566
+
567
+ dp is a double precision variable.
568
+ On entry dp is the derivative of the function at stp.
569
+ On exit dp is unchanged.
570
+
571
+ brackt is an logical variable.
572
+ On entry brackt specifies if a minimizer has been bracketed.
573
+ Initially brackt must be set to .false.
574
+ On exit brackt specifies if a minimizer has been bracketed.
575
+ When a minimizer is bracketed brackt is set to .true.
576
+
577
+ stpmin is a double precision variable.
578
+ On entry stpmin is a lower bound for the step.
579
+ On exit stpmin is unchanged.
580
+
581
+ stpmax is a double precision variable.
582
+ On entry stpmax is an upper bound for the step.
583
+ On exit stpmax is unchanged.
584
+
585
+ MINPACK-1 Project. June 1983
586
+ Argonne National Laboratory.
587
+ Jorge J. More' and David J. Thuente.
588
+
589
+ MINPACK-2 Project. November 1993.
590
+ Argonne National Laboratory and University of Minnesota.
591
+ Brett M. Averick and Jorge J. More'.
592
+
593
+ """
594
+ sgn_dp = np.sign(dp)
595
+ sgn_dx = np.sign(dx)
596
+
597
+ # sgnd = dp * (dx / abs(dx))
598
+ sgnd = sgn_dp * sgn_dx
599
+
600
+ # First case: A higher function value. The minimum is bracketed.
601
+ # If the cubic step is closer to stx than the quadratic step, the
602
+ # cubic step is taken, otherwise the average of the cubic and
603
+ # quadratic steps is taken.
604
+ if fp > fx:
605
+ theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp
606
+ s = max(abs(theta), abs(dx), abs(dp))
607
+ gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
608
+ if stp < stx:
609
+ gamma *= -1
610
+ p = (gamma - dx) + theta
611
+ q = ((gamma - dx) + gamma) + dp
612
+ r = p / q
613
+ stpc = stx + r * (stp - stx)
614
+ stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)
615
+ if abs(stpc - stx) <= abs(stpq - stx):
616
+ stpf = stpc
617
+ else:
618
+ stpf = stpc + (stpq - stpc) / 2.0
619
+ brackt = True
620
+ elif sgnd < 0.0:
621
+ # Second case: A lower function value and derivatives of opposite
622
+ # sign. The minimum is bracketed. If the cubic step is farther from
623
+ # stp than the secant step, the cubic step is taken, otherwise the
624
+ # secant step is taken.
625
+ theta = 3 * (fx - fp) / (stp - stx) + dx + dp
626
+ s = max(abs(theta), abs(dx), abs(dp))
627
+ gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
628
+ if stp > stx:
629
+ gamma *= -1
630
+ p = (gamma - dp) + theta
631
+ q = ((gamma - dp) + gamma) + dx
632
+ r = p / q
633
+ stpc = stp + r * (stx - stp)
634
+ stpq = stp + (dp / (dp - dx)) * (stx - stp)
635
+ if abs(stpc - stp) > abs(stpq - stp):
636
+ stpf = stpc
637
+ else:
638
+ stpf = stpq
639
+ brackt = True
640
+ elif abs(dp) < abs(dx):
641
+ # Third case: A lower function value, derivatives of the same sign,
642
+ # and the magnitude of the derivative decreases.
643
+
644
+ # The cubic step is computed only if the cubic tends to infinity
645
+ # in the direction of the step or if the minimum of the cubic
646
+ # is beyond stp. Otherwise the cubic step is defined to be the
647
+ # secant step.
648
+ theta = 3 * (fx - fp) / (stp - stx) + dx + dp
649
+ s = max(abs(theta), abs(dx), abs(dp))
650
+
651
+ # The case gamma = 0 only arises if the cubic does not tend
652
+ # to infinity in the direction of the step.
653
+ gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s)))
654
+ if stp > stx:
655
+ gamma = -gamma
656
+ p = (gamma - dp) + theta
657
+ q = (gamma + (dx - dp)) + gamma
658
+ r = p / q
659
+ if r < 0 and gamma != 0:
660
+ stpc = stp + r * (stx - stp)
661
+ elif stp > stx:
662
+ stpc = stpmax
663
+ else:
664
+ stpc = stpmin
665
+ stpq = stp + (dp / (dp - dx)) * (stx - stp)
666
+
667
+ if brackt:
668
+ # A minimizer has been bracketed. If the cubic step is
669
+ # closer to stp than the secant step, the cubic step is
670
+ # taken, otherwise the secant step is taken.
671
+ if abs(stpc - stp) < abs(stpq - stp):
672
+ stpf = stpc
673
+ else:
674
+ stpf = stpq
675
+
676
+ if stp > stx:
677
+ stpf = min(stp + 0.66 * (sty - stp), stpf)
678
+ else:
679
+ stpf = max(stp + 0.66 * (sty - stp), stpf)
680
+ else:
681
+ # A minimizer has not been bracketed. If the cubic step is
682
+ # farther from stp than the secant step, the cubic step is
683
+ # taken, otherwise the secant step is taken.
684
+ if abs(stpc - stp) > abs(stpq - stp):
685
+ stpf = stpc
686
+ else:
687
+ stpf = stpq
688
+ stpf = np.clip(stpf, stpmin, stpmax)
689
+
690
+ else:
691
+ # Fourth case: A lower function value, derivatives of the same sign,
692
+ # and the magnitude of the derivative does not decrease. If the
693
+ # minimum is not bracketed, the step is either stpmin or stpmax,
694
+ # otherwise the cubic step is taken.
695
+ if brackt:
696
+ theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp
697
+ s = max(abs(theta), abs(dy), abs(dp))
698
+ gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))
699
+ if stp > sty:
700
+ gamma = -gamma
701
+ p = (gamma - dp) + theta
702
+ q = ((gamma - dp) + gamma) + dy
703
+ r = p / q
704
+ stpc = stp + r * (sty - stp)
705
+ stpf = stpc
706
+ elif stp > stx:
707
+ stpf = stpmax
708
+ else:
709
+ stpf = stpmin
710
+
711
+ # Update the interval which contains a minimizer.
712
+ if fp > fx:
713
+ sty = stp
714
+ fy = fp
715
+ dy = dp
716
+ else:
717
+ if sgnd < 0:
718
+ sty = stx
719
+ fy = fx
720
+ dy = dx
721
+ stx = stp
722
+ fx = fp
723
+ dx = dp
724
+
725
+ # Compute the new step.
726
+ stp = stpf
727
+
728
+ return stx, fx, dx, sty, fy, dy, stp, brackt
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.sparse as sps
3
+ from ._numdiff import approx_derivative, group_columns
4
+ from ._hessian_update_strategy import HessianUpdateStrategy
5
+ from scipy.sparse.linalg import LinearOperator
6
+ from scipy._lib._array_api import atleast_nd, array_namespace
7
+
8
+
9
+ FD_METHODS = ('2-point', '3-point', 'cs')
10
+
11
+
12
+ class ScalarFunction:
13
+ """Scalar function and its derivatives.
14
+
15
+ This class defines a scalar function F: R^n->R and methods for
16
+ computing or approximating its first and second derivatives.
17
+
18
+ Parameters
19
+ ----------
20
+ fun : callable
21
+ evaluates the scalar function. Must be of the form ``fun(x, *args)``,
22
+ where ``x`` is the argument in the form of a 1-D array and ``args`` is
23
+ a tuple of any additional fixed parameters needed to completely specify
24
+ the function. Should return a scalar.
25
+ x0 : array-like
26
+ Provides an initial set of variables for evaluating fun. Array of real
27
+ elements of size (n,), where 'n' is the number of independent
28
+ variables.
29
+ args : tuple, optional
30
+ Any additional fixed parameters needed to completely specify the scalar
31
+ function.
32
+ grad : {callable, '2-point', '3-point', 'cs'}
33
+ Method for computing the gradient vector.
34
+ If it is a callable, it should be a function that returns the gradient
35
+ vector:
36
+
37
+ ``grad(x, *args) -> array_like, shape (n,)``
38
+
39
+ where ``x`` is an array with shape (n,) and ``args`` is a tuple with
40
+ the fixed parameters.
41
+ Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
42
+ to select a finite difference scheme for numerical estimation of the
43
+ gradient with a relative step size. These finite difference schemes
44
+ obey any specified `bounds`.
45
+ hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}
46
+ Method for computing the Hessian matrix. If it is callable, it should
47
+ return the Hessian matrix:
48
+
49
+ ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
50
+
51
+ where x is a (n,) ndarray and `args` is a tuple with the fixed
52
+ parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'}
53
+ select a finite difference scheme for numerical estimation. Or, objects
54
+ implementing `HessianUpdateStrategy` interface can be used to
55
+ approximate the Hessian.
56
+ Whenever the gradient is estimated via finite-differences, the Hessian
57
+ cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
58
+ to be estimated using one of the quasi-Newton strategies.
59
+ finite_diff_rel_step : None or array_like
60
+ Relative step size to use. The absolute step size is computed as
61
+ ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly
62
+ adjusted to fit into the bounds. For ``method='3-point'`` the sign
63
+ of `h` is ignored. If None then finite_diff_rel_step is selected
64
+ automatically,
65
+ finite_diff_bounds : tuple of array_like
66
+ Lower and upper bounds on independent variables. Defaults to no bounds,
67
+ (-np.inf, np.inf). Each bound must match the size of `x0` or be a
68
+ scalar, in the latter case the bound will be the same for all
69
+ variables. Use it to limit the range of function evaluation.
70
+ epsilon : None or array_like, optional
71
+ Absolute step size to use, possibly adjusted to fit into the bounds.
72
+ For ``method='3-point'`` the sign of `epsilon` is ignored. By default
73
+ relative steps are used, only if ``epsilon is not None`` are absolute
74
+ steps used.
75
+
76
+ Notes
77
+ -----
78
+ This class implements a memoization logic. There are methods `fun`,
79
+ `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
80
+ things should be considered:
81
+
82
+ 1. Use only public methods `fun`, `grad` and `hess`.
83
+ 2. After one of the methods is called, the corresponding attribute
84
+ will be set. However, a subsequent call with a different argument
85
+ of *any* of the methods may overwrite the attribute.
86
+ """
87
+ def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
88
+ finite_diff_bounds, epsilon=None):
89
+ if not callable(grad) and grad not in FD_METHODS:
90
+ raise ValueError(
91
+ f"`grad` must be either callable or one of {FD_METHODS}."
92
+ )
93
+
94
+ if not (callable(hess) or hess in FD_METHODS
95
+ or isinstance(hess, HessianUpdateStrategy)):
96
+ raise ValueError(
97
+ f"`hess` must be either callable, HessianUpdateStrategy"
98
+ f" or one of {FD_METHODS}."
99
+ )
100
+
101
+ if grad in FD_METHODS and hess in FD_METHODS:
102
+ raise ValueError("Whenever the gradient is estimated via "
103
+ "finite-differences, we require the Hessian "
104
+ "to be estimated using one of the "
105
+ "quasi-Newton strategies.")
106
+
107
+ self.xp = xp = array_namespace(x0)
108
+ _x = atleast_nd(x0, ndim=1, xp=xp)
109
+ _dtype = xp.float64
110
+ if xp.isdtype(_x.dtype, "real floating"):
111
+ _dtype = _x.dtype
112
+
113
+ # promotes to floating
114
+ self.x = xp.astype(_x, _dtype)
115
+ self.x_dtype = _dtype
116
+ self.n = self.x.size
117
+ self.nfev = 0
118
+ self.ngev = 0
119
+ self.nhev = 0
120
+ self.f_updated = False
121
+ self.g_updated = False
122
+ self.H_updated = False
123
+
124
+ self._lowest_x = None
125
+ self._lowest_f = np.inf
126
+
127
+ finite_diff_options = {}
128
+ if grad in FD_METHODS:
129
+ finite_diff_options["method"] = grad
130
+ finite_diff_options["rel_step"] = finite_diff_rel_step
131
+ finite_diff_options["abs_step"] = epsilon
132
+ finite_diff_options["bounds"] = finite_diff_bounds
133
+ if hess in FD_METHODS:
134
+ finite_diff_options["method"] = hess
135
+ finite_diff_options["rel_step"] = finite_diff_rel_step
136
+ finite_diff_options["abs_step"] = epsilon
137
+ finite_diff_options["as_linear_operator"] = True
138
+
139
+ # Function evaluation
140
+ def fun_wrapped(x):
141
+ self.nfev += 1
142
+ # Send a copy because the user may overwrite it.
143
+ # Overwriting results in undefined behaviour because
144
+ # fun(self.x) will change self.x, with the two no longer linked.
145
+ fx = fun(np.copy(x), *args)
146
+ # Make sure the function returns a true scalar
147
+ if not np.isscalar(fx):
148
+ try:
149
+ fx = np.asarray(fx).item()
150
+ except (TypeError, ValueError) as e:
151
+ raise ValueError(
152
+ "The user-provided objective function "
153
+ "must return a scalar value."
154
+ ) from e
155
+
156
+ if fx < self._lowest_f:
157
+ self._lowest_x = x
158
+ self._lowest_f = fx
159
+
160
+ return fx
161
+
162
+ def update_fun():
163
+ self.f = fun_wrapped(self.x)
164
+
165
+ self._update_fun_impl = update_fun
166
+ self._update_fun()
167
+
168
+ # Gradient evaluation
169
+ if callable(grad):
170
+ def grad_wrapped(x):
171
+ self.ngev += 1
172
+ return np.atleast_1d(grad(np.copy(x), *args))
173
+
174
+ def update_grad():
175
+ self.g = grad_wrapped(self.x)
176
+
177
+ elif grad in FD_METHODS:
178
+ def update_grad():
179
+ self._update_fun()
180
+ self.ngev += 1
181
+ self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
182
+ **finite_diff_options)
183
+
184
+ self._update_grad_impl = update_grad
185
+ self._update_grad()
186
+
187
+ # Hessian Evaluation
188
+ if callable(hess):
189
+ self.H = hess(np.copy(x0), *args)
190
+ self.H_updated = True
191
+ self.nhev += 1
192
+
193
+ if sps.issparse(self.H):
194
+ def hess_wrapped(x):
195
+ self.nhev += 1
196
+ return sps.csr_matrix(hess(np.copy(x), *args))
197
+ self.H = sps.csr_matrix(self.H)
198
+
199
+ elif isinstance(self.H, LinearOperator):
200
+ def hess_wrapped(x):
201
+ self.nhev += 1
202
+ return hess(np.copy(x), *args)
203
+
204
+ else:
205
+ def hess_wrapped(x):
206
+ self.nhev += 1
207
+ return np.atleast_2d(np.asarray(hess(np.copy(x), *args)))
208
+ self.H = np.atleast_2d(np.asarray(self.H))
209
+
210
+ def update_hess():
211
+ self.H = hess_wrapped(self.x)
212
+
213
+ elif hess in FD_METHODS:
214
+ def update_hess():
215
+ self._update_grad()
216
+ self.H = approx_derivative(grad_wrapped, self.x, f0=self.g,
217
+ **finite_diff_options)
218
+ return self.H
219
+
220
+ update_hess()
221
+ self.H_updated = True
222
+ elif isinstance(hess, HessianUpdateStrategy):
223
+ self.H = hess
224
+ self.H.initialize(self.n, 'hess')
225
+ self.H_updated = True
226
+ self.x_prev = None
227
+ self.g_prev = None
228
+
229
+ def update_hess():
230
+ self._update_grad()
231
+ self.H.update(self.x - self.x_prev, self.g - self.g_prev)
232
+
233
+ self._update_hess_impl = update_hess
234
+
235
+ if isinstance(hess, HessianUpdateStrategy):
236
+ def update_x(x):
237
+ self._update_grad()
238
+ self.x_prev = self.x
239
+ self.g_prev = self.g
240
+ # ensure that self.x is a copy of x. Don't store a reference
241
+ # otherwise the memoization doesn't work properly.
242
+
243
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
244
+ self.x = self.xp.astype(_x, self.x_dtype)
245
+ self.f_updated = False
246
+ self.g_updated = False
247
+ self.H_updated = False
248
+ self._update_hess()
249
+ else:
250
+ def update_x(x):
251
+ # ensure that self.x is a copy of x. Don't store a reference
252
+ # otherwise the memoization doesn't work properly.
253
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
254
+ self.x = self.xp.astype(_x, self.x_dtype)
255
+ self.f_updated = False
256
+ self.g_updated = False
257
+ self.H_updated = False
258
+ self._update_x_impl = update_x
259
+
260
+ def _update_fun(self):
261
+ if not self.f_updated:
262
+ self._update_fun_impl()
263
+ self.f_updated = True
264
+
265
+ def _update_grad(self):
266
+ if not self.g_updated:
267
+ self._update_grad_impl()
268
+ self.g_updated = True
269
+
270
+ def _update_hess(self):
271
+ if not self.H_updated:
272
+ self._update_hess_impl()
273
+ self.H_updated = True
274
+
275
+ def fun(self, x):
276
+ if not np.array_equal(x, self.x):
277
+ self._update_x_impl(x)
278
+ self._update_fun()
279
+ return self.f
280
+
281
+ def grad(self, x):
282
+ if not np.array_equal(x, self.x):
283
+ self._update_x_impl(x)
284
+ self._update_grad()
285
+ return self.g
286
+
287
+ def hess(self, x):
288
+ if not np.array_equal(x, self.x):
289
+ self._update_x_impl(x)
290
+ self._update_hess()
291
+ return self.H
292
+
293
+ def fun_and_grad(self, x):
294
+ if not np.array_equal(x, self.x):
295
+ self._update_x_impl(x)
296
+ self._update_fun()
297
+ self._update_grad()
298
+ return self.f, self.g
299
+
300
+
301
+ class VectorFunction:
302
+ """Vector function and its derivatives.
303
+
304
+ This class defines a vector function F: R^n->R^m and methods for
305
+ computing or approximating its first and second derivatives.
306
+
307
+ Notes
308
+ -----
309
+ This class implements a memoization logic. There are methods `fun`,
310
+ `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following
311
+ things should be considered:
312
+
313
+ 1. Use only public methods `fun`, `jac` and `hess`.
314
+ 2. After one of the methods is called, the corresponding attribute
315
+ will be set. However, a subsequent call with a different argument
316
+ of *any* of the methods may overwrite the attribute.
317
+ """
318
+ def __init__(self, fun, x0, jac, hess,
319
+ finite_diff_rel_step, finite_diff_jac_sparsity,
320
+ finite_diff_bounds, sparse_jacobian):
321
+ if not callable(jac) and jac not in FD_METHODS:
322
+ raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.")
323
+
324
+ if not (callable(hess) or hess in FD_METHODS
325
+ or isinstance(hess, HessianUpdateStrategy)):
326
+ raise ValueError("`hess` must be either callable,"
327
+ f"HessianUpdateStrategy or one of {FD_METHODS}.")
328
+
329
+ if jac in FD_METHODS and hess in FD_METHODS:
330
+ raise ValueError("Whenever the Jacobian is estimated via "
331
+ "finite-differences, we require the Hessian to "
332
+ "be estimated using one of the quasi-Newton "
333
+ "strategies.")
334
+
335
+ self.xp = xp = array_namespace(x0)
336
+ _x = atleast_nd(x0, ndim=1, xp=xp)
337
+ _dtype = xp.float64
338
+ if xp.isdtype(_x.dtype, "real floating"):
339
+ _dtype = _x.dtype
340
+
341
+ # promotes to floating
342
+ self.x = xp.astype(_x, _dtype)
343
+ self.x_dtype = _dtype
344
+
345
+ self.n = self.x.size
346
+ self.nfev = 0
347
+ self.njev = 0
348
+ self.nhev = 0
349
+ self.f_updated = False
350
+ self.J_updated = False
351
+ self.H_updated = False
352
+
353
+ finite_diff_options = {}
354
+ if jac in FD_METHODS:
355
+ finite_diff_options["method"] = jac
356
+ finite_diff_options["rel_step"] = finite_diff_rel_step
357
+ if finite_diff_jac_sparsity is not None:
358
+ sparsity_groups = group_columns(finite_diff_jac_sparsity)
359
+ finite_diff_options["sparsity"] = (finite_diff_jac_sparsity,
360
+ sparsity_groups)
361
+ finite_diff_options["bounds"] = finite_diff_bounds
362
+ self.x_diff = np.copy(self.x)
363
+ if hess in FD_METHODS:
364
+ finite_diff_options["method"] = hess
365
+ finite_diff_options["rel_step"] = finite_diff_rel_step
366
+ finite_diff_options["as_linear_operator"] = True
367
+ self.x_diff = np.copy(self.x)
368
+ if jac in FD_METHODS and hess in FD_METHODS:
369
+ raise ValueError("Whenever the Jacobian is estimated via "
370
+ "finite-differences, we require the Hessian to "
371
+ "be estimated using one of the quasi-Newton "
372
+ "strategies.")
373
+
374
+ # Function evaluation
375
+ def fun_wrapped(x):
376
+ self.nfev += 1
377
+ return np.atleast_1d(fun(x))
378
+
379
+ def update_fun():
380
+ self.f = fun_wrapped(self.x)
381
+
382
+ self._update_fun_impl = update_fun
383
+ update_fun()
384
+
385
+ self.v = np.zeros_like(self.f)
386
+ self.m = self.v.size
387
+
388
+ # Jacobian Evaluation
389
+ if callable(jac):
390
+ self.J = jac(self.x)
391
+ self.J_updated = True
392
+ self.njev += 1
393
+
394
+ if (sparse_jacobian or
395
+ sparse_jacobian is None and sps.issparse(self.J)):
396
+ def jac_wrapped(x):
397
+ self.njev += 1
398
+ return sps.csr_matrix(jac(x))
399
+ self.J = sps.csr_matrix(self.J)
400
+ self.sparse_jacobian = True
401
+
402
+ elif sps.issparse(self.J):
403
+ def jac_wrapped(x):
404
+ self.njev += 1
405
+ return jac(x).toarray()
406
+ self.J = self.J.toarray()
407
+ self.sparse_jacobian = False
408
+
409
+ else:
410
+ def jac_wrapped(x):
411
+ self.njev += 1
412
+ return np.atleast_2d(jac(x))
413
+ self.J = np.atleast_2d(self.J)
414
+ self.sparse_jacobian = False
415
+
416
+ def update_jac():
417
+ self.J = jac_wrapped(self.x)
418
+
419
+ elif jac in FD_METHODS:
420
+ self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
421
+ **finite_diff_options)
422
+ self.J_updated = True
423
+
424
+ if (sparse_jacobian or
425
+ sparse_jacobian is None and sps.issparse(self.J)):
426
+ def update_jac():
427
+ self._update_fun()
428
+ self.J = sps.csr_matrix(
429
+ approx_derivative(fun_wrapped, self.x, f0=self.f,
430
+ **finite_diff_options))
431
+ self.J = sps.csr_matrix(self.J)
432
+ self.sparse_jacobian = True
433
+
434
+ elif sps.issparse(self.J):
435
+ def update_jac():
436
+ self._update_fun()
437
+ self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
438
+ **finite_diff_options).toarray()
439
+ self.J = self.J.toarray()
440
+ self.sparse_jacobian = False
441
+
442
+ else:
443
+ def update_jac():
444
+ self._update_fun()
445
+ self.J = np.atleast_2d(
446
+ approx_derivative(fun_wrapped, self.x, f0=self.f,
447
+ **finite_diff_options))
448
+ self.J = np.atleast_2d(self.J)
449
+ self.sparse_jacobian = False
450
+
451
+ self._update_jac_impl = update_jac
452
+
453
+ # Define Hessian
454
+ if callable(hess):
455
+ self.H = hess(self.x, self.v)
456
+ self.H_updated = True
457
+ self.nhev += 1
458
+
459
+ if sps.issparse(self.H):
460
+ def hess_wrapped(x, v):
461
+ self.nhev += 1
462
+ return sps.csr_matrix(hess(x, v))
463
+ self.H = sps.csr_matrix(self.H)
464
+
465
+ elif isinstance(self.H, LinearOperator):
466
+ def hess_wrapped(x, v):
467
+ self.nhev += 1
468
+ return hess(x, v)
469
+
470
+ else:
471
+ def hess_wrapped(x, v):
472
+ self.nhev += 1
473
+ return np.atleast_2d(np.asarray(hess(x, v)))
474
+ self.H = np.atleast_2d(np.asarray(self.H))
475
+
476
+ def update_hess():
477
+ self.H = hess_wrapped(self.x, self.v)
478
+ elif hess in FD_METHODS:
479
+ def jac_dot_v(x, v):
480
+ return jac_wrapped(x).T.dot(v)
481
+
482
+ def update_hess():
483
+ self._update_jac()
484
+ self.H = approx_derivative(jac_dot_v, self.x,
485
+ f0=self.J.T.dot(self.v),
486
+ args=(self.v,),
487
+ **finite_diff_options)
488
+ update_hess()
489
+ self.H_updated = True
490
+ elif isinstance(hess, HessianUpdateStrategy):
491
+ self.H = hess
492
+ self.H.initialize(self.n, 'hess')
493
+ self.H_updated = True
494
+ self.x_prev = None
495
+ self.J_prev = None
496
+
497
+ def update_hess():
498
+ self._update_jac()
499
+ # When v is updated before x was updated, then x_prev and
500
+ # J_prev are None and we need this check.
501
+ if self.x_prev is not None and self.J_prev is not None:
502
+ delta_x = self.x - self.x_prev
503
+ delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v)
504
+ self.H.update(delta_x, delta_g)
505
+
506
+ self._update_hess_impl = update_hess
507
+
508
+ if isinstance(hess, HessianUpdateStrategy):
509
+ def update_x(x):
510
+ self._update_jac()
511
+ self.x_prev = self.x
512
+ self.J_prev = self.J
513
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
514
+ self.x = self.xp.astype(_x, self.x_dtype)
515
+ self.f_updated = False
516
+ self.J_updated = False
517
+ self.H_updated = False
518
+ self._update_hess()
519
+ else:
520
+ def update_x(x):
521
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
522
+ self.x = self.xp.astype(_x, self.x_dtype)
523
+ self.f_updated = False
524
+ self.J_updated = False
525
+ self.H_updated = False
526
+
527
+ self._update_x_impl = update_x
528
+
529
+ def _update_v(self, v):
530
+ if not np.array_equal(v, self.v):
531
+ self.v = v
532
+ self.H_updated = False
533
+
534
+ def _update_x(self, x):
535
+ if not np.array_equal(x, self.x):
536
+ self._update_x_impl(x)
537
+
538
+ def _update_fun(self):
539
+ if not self.f_updated:
540
+ self._update_fun_impl()
541
+ self.f_updated = True
542
+
543
+ def _update_jac(self):
544
+ if not self.J_updated:
545
+ self._update_jac_impl()
546
+ self.J_updated = True
547
+
548
+ def _update_hess(self):
549
+ if not self.H_updated:
550
+ self._update_hess_impl()
551
+ self.H_updated = True
552
+
553
+ def fun(self, x):
554
+ self._update_x(x)
555
+ self._update_fun()
556
+ return self.f
557
+
558
+ def jac(self, x):
559
+ self._update_x(x)
560
+ self._update_jac()
561
+ return self.J
562
+
563
+ def hess(self, x, v):
564
+ # v should be updated before x.
565
+ self._update_v(v)
566
+ self._update_x(x)
567
+ self._update_hess()
568
+ return self.H
569
+
570
+
571
+ class LinearVectorFunction:
572
+ """Linear vector function and its derivatives.
573
+
574
+ Defines a linear function F = A x, where x is N-D vector and
575
+ A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian
576
+ is identically zero and it is returned as a csr matrix.
577
+ """
578
+ def __init__(self, A, x0, sparse_jacobian):
579
+ if sparse_jacobian or sparse_jacobian is None and sps.issparse(A):
580
+ self.J = sps.csr_matrix(A)
581
+ self.sparse_jacobian = True
582
+ elif sps.issparse(A):
583
+ self.J = A.toarray()
584
+ self.sparse_jacobian = False
585
+ else:
586
+ # np.asarray makes sure A is ndarray and not matrix
587
+ self.J = np.atleast_2d(np.asarray(A))
588
+ self.sparse_jacobian = False
589
+
590
+ self.m, self.n = self.J.shape
591
+
592
+ self.xp = xp = array_namespace(x0)
593
+ _x = atleast_nd(x0, ndim=1, xp=xp)
594
+ _dtype = xp.float64
595
+ if xp.isdtype(_x.dtype, "real floating"):
596
+ _dtype = _x.dtype
597
+
598
+ # promotes to floating
599
+ self.x = xp.astype(_x, _dtype)
600
+ self.x_dtype = _dtype
601
+
602
+ self.f = self.J.dot(self.x)
603
+ self.f_updated = True
604
+
605
+ self.v = np.zeros(self.m, dtype=float)
606
+ self.H = sps.csr_matrix((self.n, self.n))
607
+
608
+ def _update_x(self, x):
609
+ if not np.array_equal(x, self.x):
610
+ _x = atleast_nd(x, ndim=1, xp=self.xp)
611
+ self.x = self.xp.astype(_x, self.x_dtype)
612
+ self.f_updated = False
613
+
614
+ def fun(self, x):
615
+ self._update_x(x)
616
+ if not self.f_updated:
617
+ self.f = self.J.dot(x)
618
+ self.f_updated = True
619
+ return self.f
620
+
621
+ def jac(self, x):
622
+ self._update_x(x)
623
+ return self.J
624
+
625
+ def hess(self, x, v):
626
+ self._update_x(x)
627
+ self.v = v
628
+ return self.H
629
+
630
+
631
+ class IdentityVectorFunction(LinearVectorFunction):
632
+ """Identity vector function and its derivatives.
633
+
634
+ The Jacobian is the identity matrix, returned as a dense array when
635
+ `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is
636
+ identically zero and it is returned as a csr matrix.
637
+ """
638
+ def __init__(self, x0, sparse_jacobian):
639
+ n = len(x0)
640
+ if sparse_jacobian or sparse_jacobian is None:
641
+ A = sps.eye(n, format='csr')
642
+ sparse_jacobian = True
643
+ else:
644
+ A = np.eye(n)
645
+ sparse_jacobian = False
646
+ super().__init__(A, x0, sparse_jacobian)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiate.py ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ import scipy._lib._elementwise_iterative_method as eim
4
+ from scipy._lib._util import _RichResult
5
+
6
+ _EERRORINCREASE = -1 # used in _differentiate
7
+
8
+ def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
9
+ step_factor, step_direction, preserve_shape, callback):
10
+ # Input validation for `_differentiate`
11
+
12
+ if not callable(func):
13
+ raise ValueError('`func` must be callable.')
14
+
15
+ # x has more complex IV that is taken care of during initialization
16
+ x = np.asarray(x)
17
+ dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64
18
+
19
+ if not np.iterable(args):
20
+ args = (args,)
21
+
22
+ if atol is None:
23
+ atol = np.finfo(dtype).tiny
24
+
25
+ if rtol is None:
26
+ rtol = np.sqrt(np.finfo(dtype).eps)
27
+
28
+ message = 'Tolerances and step parameters must be non-negative scalars.'
29
+ tols = np.asarray([atol, rtol, initial_step, step_factor])
30
+ if (not np.issubdtype(tols.dtype, np.number)
31
+ or np.any(tols < 0)
32
+ or tols.shape != (4,)):
33
+ raise ValueError(message)
34
+ initial_step, step_factor = tols[2:].astype(dtype)
35
+
36
+ maxiter_int = int(maxiter)
37
+ if maxiter != maxiter_int or maxiter <= 0:
38
+ raise ValueError('`maxiter` must be a positive integer.')
39
+
40
+ order_int = int(order)
41
+ if order_int != order or order <= 0:
42
+ raise ValueError('`order` must be a positive integer.')
43
+
44
+ step_direction = np.sign(step_direction).astype(dtype)
45
+ x, step_direction = np.broadcast_arrays(x, step_direction)
46
+ x, step_direction = x[()], step_direction[()]
47
+
48
+ message = '`preserve_shape` must be True or False.'
49
+ if preserve_shape not in {True, False}:
50
+ raise ValueError(message)
51
+
52
+ if callback is not None and not callable(callback):
53
+ raise ValueError('`callback` must be callable.')
54
+
55
+ return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step,
56
+ step_factor, step_direction, preserve_shape, callback)
57
+
58
+
59
+ def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10,
60
+ order=8, initial_step=0.5, step_factor=2.0,
61
+ step_direction=0, preserve_shape=False, callback=None):
62
+ """Evaluate the derivative of an elementwise scalar function numerically.
63
+
64
+ Parameters
65
+ ----------
66
+ func : callable
67
+ The function whose derivative is desired. The signature must be::
68
+
69
+ func(x: ndarray, *fargs) -> ndarray
70
+
71
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
72
+ which may contain an arbitrary number of arrays that are broadcastable
73
+ with `x`. ``func`` must be an elementwise function: each element
74
+ ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
75
+ x : array_like
76
+ Abscissae at which to evaluate the derivative.
77
+ args : tuple, optional
78
+ Additional positional arguments to be passed to `func`. Must be arrays
79
+ broadcastable with `x`. If the callable to be differentiated requires
80
+ arguments that are not broadcastable with `x`, wrap that callable with
81
+ `func`. See Examples.
82
+ atol, rtol : float, optional
83
+ Absolute and relative tolerances for the stopping condition: iteration
84
+ will stop when ``res.error < atol + rtol * abs(res.df)``. The default
85
+ `atol` is the smallest normal number of the appropriate dtype, and
86
+ the default `rtol` is the square root of the precision of the
87
+ appropriate dtype.
88
+ order : int, default: 8
89
+ The (positive integer) order of the finite difference formula to be
90
+ used. Odd integers will be rounded up to the next even integer.
91
+ initial_step : float, default: 0.5
92
+ The (absolute) initial step size for the finite difference derivative
93
+ approximation.
94
+ step_factor : float, default: 2.0
95
+ The factor by which the step size is *reduced* in each iteration; i.e.
96
+ the step size in iteration 1 is ``initial_step/step_factor``. If
97
+ ``step_factor < 1``, subsequent steps will be greater than the initial
98
+ step; this may be useful if steps smaller than some threshold are
99
+ undesirable (e.g. due to subtractive cancellation error).
100
+ maxiter : int, default: 10
101
+ The maximum number of iterations of the algorithm to perform. See
102
+ notes.
103
+ step_direction : array_like
104
+ An array representing the direction of the finite difference steps (for
105
+ use when `x` lies near to the boundary of the domain of the function.)
106
+ Must be broadcastable with `x` and all `args`.
107
+ Where 0 (default), central differences are used; where negative (e.g.
108
+ -1), steps are non-positive; and where positive (e.g. 1), all steps are
109
+ non-negative.
110
+ preserve_shape : bool, default: False
111
+ In the following, "arguments of `func`" refers to the array ``x`` and
112
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
113
+ of `x` and all elements of `args` (which is conceptually
114
+ distinct from ``fargs`` passed into `f`).
115
+
116
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
117
+ of *any* broadcastable shapes.
118
+
119
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
120
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
121
+ abscissae at which the function is being evaluated.
122
+
123
+ In either case, for each scalar element ``xi`` within `x`, the array
124
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
125
+ Consequently, the shape of the output is always the shape of the input
126
+ ``x``.
127
+
128
+ See Examples.
129
+ callback : callable, optional
130
+ An optional user-supplied function to be called before the first
131
+ iteration and after each iteration.
132
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
133
+ similar to that returned by `_differentiate` (but containing the
134
+ current iterate's values of all variables). If `callback` raises a
135
+ ``StopIteration``, the algorithm will terminate immediately and
136
+ `_differentiate` will return a result.
137
+
138
+ Returns
139
+ -------
140
+ res : _RichResult
141
+ An instance of `scipy._lib._util._RichResult` with the following
142
+ attributes. (The descriptions are written as though the values will be
143
+ scalars; however, if `func` returns an array, the outputs will be
144
+ arrays of the same shape.)
145
+
146
+ success : bool
147
+ ``True`` when the algorithm terminated successfully (status ``0``).
148
+ status : int
149
+ An integer representing the exit status of the algorithm.
150
+ ``0`` : The algorithm converged to the specified tolerances.
151
+ ``-1`` : The error estimate increased, so iteration was terminated.
152
+ ``-2`` : The maximum number of iterations was reached.
153
+ ``-3`` : A non-finite value was encountered.
154
+ ``-4`` : Iteration was terminated by `callback`.
155
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
156
+ df : float
157
+ The derivative of `func` at `x`, if the algorithm terminated
158
+ successfully.
159
+ error : float
160
+ An estimate of the error: the magnitude of the difference between
161
+ the current estimate of the derivative and the estimate in the
162
+ previous iteration.
163
+ nit : int
164
+ The number of iterations performed.
165
+ nfev : int
166
+ The number of points at which `func` was evaluated.
167
+ x : float
168
+ The value at which the derivative of `func` was evaluated
169
+ (after broadcasting with `args` and `step_direction`).
170
+
171
+ Notes
172
+ -----
173
+ The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
174
+ DERIVEST [3]_, but the implementation follows the theory of Taylor series
175
+ more straightforwardly (and arguably naively so).
176
+ In the first iteration, the derivative is estimated using a finite
177
+ difference formula of order `order` with maximum step size `initial_step`.
178
+ Each subsequent iteration, the maximum step size is reduced by
179
+ `step_factor`, and the derivative is estimated again until a termination
180
+ condition is reached. The error estimate is the magnitude of the difference
181
+ between the current derivative approximation and that of the previous
182
+ iteration.
183
+
184
+ The stencils of the finite difference formulae are designed such that
185
+ abscissae are "nested": after `func` is evaluated at ``order + 1``
186
+ points in the first iteration, `func` is evaluated at only two new points
187
+ in each subsequent iteration; ``order - 1`` previously evaluated function
188
+ values required by the finite difference formula are reused, and two
189
+ function values (evaluations at the points furthest from `x`) are unused.
190
+
191
+ Step sizes are absolute. When the step size is small relative to the
192
+ magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
193
+ default initial step size of ``0.5`` cannot be resolved. Accordingly,
194
+ consider using larger initial step sizes for large magnitudes of `x`.
195
+
196
+ The default tolerances are challenging to satisfy at points where the
197
+ true derivative is exactly zero. If the derivative may be exactly zero,
198
+ consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to
199
+ improve convergence.
200
+
201
+ References
202
+ ----------
203
+ [1]_ Hans Dembinski (@HDembinski). jacobi.
204
+ https://github.com/HDembinski/jacobi
205
+ [2]_ Per A. Brodtkorb and John D'Errico. numdifftools.
206
+ https://numdifftools.readthedocs.io/en/latest/
207
+ [3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
208
+ https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
209
+ [4]_ Numerical Differentition. Wikipedia.
210
+ https://en.wikipedia.org/wiki/Numerical_differentiation
211
+
212
+ Examples
213
+ --------
214
+ Evaluate the derivative of ``np.exp`` at several points ``x``.
215
+
216
+ >>> import numpy as np
217
+ >>> from scipy.optimize._differentiate import _differentiate
218
+ >>> f = np.exp
219
+ >>> df = np.exp # true derivative
220
+ >>> x = np.linspace(1, 2, 5)
221
+ >>> res = _differentiate(f, x)
222
+ >>> res.df # approximation of the derivative
223
+ array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
224
+ >>> res.error # estimate of the error
225
+ array(
226
+ [7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11]
227
+ )
228
+ >>> abs(res.df - df(x)) # true error
229
+ array(
230
+ [3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14]
231
+ )
232
+
233
+ Show the convergence of the approximation as the step size is reduced.
234
+ Each iteration, the step size is reduced by `step_factor`, so for
235
+ sufficiently small initial step, each iteration reduces the error by a
236
+ factor of ``1/step_factor**order`` until finite precision arithmetic
237
+ inhibits further improvement.
238
+
239
+ >>> iter = list(range(1, 12)) # maximum iterations
240
+ >>> hfac = 2 # step size reduction per iteration
241
+ >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
242
+ >>> order = 4 # order of differentiation formula
243
+ >>> x = 1
244
+ >>> ref = df(x)
245
+ >>> errors = [] # true error
246
+ >>> for i in iter:
247
+ ... res = _differentiate(f, x, maxiter=i, step_factor=hfac,
248
+ ... step_direction=hdir, order=order,
249
+ ... atol=0, rtol=0) # prevent early termination
250
+ ... errors.append(abs(res.df - ref))
251
+ >>> errors = np.array(errors)
252
+ >>> plt.semilogy(iter, errors[:, 0], label='left differences')
253
+ >>> plt.semilogy(iter, errors[:, 1], label='central differences')
254
+ >>> plt.semilogy(iter, errors[:, 2], label='right differences')
255
+ >>> plt.xlabel('iteration')
256
+ >>> plt.ylabel('error')
257
+ >>> plt.legend()
258
+ >>> plt.show()
259
+ >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
260
+ (0.06215223140159822, 0.0625)
261
+
262
+ The implementation is vectorized over `x`, `step_direction`, and `args`.
263
+ The function is evaluated once before the first iteration to perform input
264
+ validation and standardization, and once per iteration thereafter.
265
+
266
+ >>> def f(x, p):
267
+ ... print('here')
268
+ ... f.nit += 1
269
+ ... return x**p
270
+ >>> f.nit = 0
271
+ >>> def df(x, p):
272
+ ... return p*x**(p-1)
273
+ >>> x = np.arange(1, 5)
274
+ >>> p = np.arange(1, 6).reshape((-1, 1))
275
+ >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
276
+ >>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1)
277
+ >>> np.allclose(res.df, df(x, p))
278
+ True
279
+ >>> res.df.shape
280
+ (3, 5, 4)
281
+ >>> f.nit
282
+ 2
283
+
284
+ By default, `preserve_shape` is False, and therefore the callable
285
+ `f` may be called with arrays of any broadcastable shapes.
286
+ For example:
287
+
288
+ >>> shapes = []
289
+ >>> def f(x, c):
290
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
291
+ ... shapes.append(shape)
292
+ ... return np.sin(c*x)
293
+ >>>
294
+ >>> c = [1, 5, 10, 20]
295
+ >>> res = _differentiate(f, 0, args=(c,))
296
+ >>> shapes
297
+ [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
298
+
299
+ To understand where these shapes are coming from - and to better
300
+ understand how `_differentiate` computes accurate results - note that
301
+ higher values of ``c`` correspond with higher frequency sinusoids.
302
+ The higher frequency sinusoids make the function's derivative change
303
+ faster, so more function evaluations are required to achieve the target
304
+ accuracy:
305
+
306
+ >>> res.nfev
307
+ array([11, 13, 15, 17])
308
+
309
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
310
+ function at a single abscissa and all four frequencies; this is used
311
+ for input validation and to determine the size and dtype of the arrays
312
+ that store results. The next shape corresponds with evaluating the
313
+ function at an initial grid of abscissae and all four frequencies.
314
+ Successive calls to the function evaluate the function at two more
315
+ abscissae, increasing the effective order of the approximation by two.
316
+ However, in later function evaluations, the function is evaluated at
317
+ fewer frequencies because the corresponding derivative has already
318
+ converged to the required tolerance. This saves function evaluations to
319
+ improve performance, but it requires the function to accept arguments of
320
+ any shape.
321
+
322
+ "Vector-valued" functions are unlikely to satisfy this requirement.
323
+ For example, consider
324
+
325
+ >>> def f(x):
326
+ ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
327
+
328
+ This integrand is not compatible with `_differentiate` as written; for instance,
329
+ the shape of the output will not be the same as the shape of ``x``. Such a
330
+ function *could* be converted to a compatible form with the introduction of
331
+ additional parameters, but this would be inconvenient. In such cases,
332
+ a simpler solution would be to use `preserve_shape`.
333
+
334
+ >>> shapes = []
335
+ >>> def f(x):
336
+ ... shapes.append(x.shape)
337
+ ... x0, x1, x2, x3 = x
338
+ ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
339
+ >>>
340
+ >>> x = np.zeros(4)
341
+ >>> res = _differentiate(f, x, preserve_shape=True)
342
+ >>> shapes
343
+ [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
344
+
345
+ Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
346
+ function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
347
+ and this is what we observe.
348
+
349
+ """
350
+ # TODO (followup):
351
+ # - investigate behavior at saddle points
352
+ # - array initial_step / step_factor?
353
+ # - multivariate functions?
354
+
355
+ res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
356
+ step_factor, step_direction, preserve_shape, callback)
357
+ (func, x, args, atol, rtol, maxiter, order,
358
+ h0, fac, hdir, preserve_shape, callback) = res
359
+
360
+ # Initialization
361
+ # Since f(x) (no step) is not needed for central differences, it may be
362
+ # possible to eliminate this function evaluation. However, it's useful for
363
+ # input validation and standardization, and everything else is designed to
364
+ # reduce function calls, so let's keep it simple.
365
+ temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
366
+ func, xs, fs, args, shape, dtype = temp
367
+ x, f = xs[0], fs[0]
368
+ df = np.full_like(f, np.nan)
369
+ # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
370
+ # it's simpler to do it here than to generalize `_elementwise_algo_init` further.
371
+ # `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know
372
+ # that `hdir` can be broadcasted to the final shape.
373
+ hdir = np.broadcast_to(hdir, shape).flatten()
374
+
375
+ status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
376
+ nit, nfev = 0, 1 # one function evaluations performed above
377
+ # Boolean indices of left, central, right, and (all) one-sided steps
378
+ il = hdir < 0
379
+ ic = hdir == 0
380
+ ir = hdir > 0
381
+ io = il | ir
382
+
383
+ # Most of these attributes are reasonably obvious, but:
384
+ # - `fs` holds all the function values of all active `x`. The zeroth
385
+ # axis corresponds with active points `x`, the first axis corresponds
386
+ # with the different steps (in the order described in
387
+ # `_differentiate_weights`).
388
+ # - `terms` (which could probably use a better name) is half the `order`,
389
+ # which is always even.
390
+ work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0,
391
+ df_last=np.nan, error_last=np.nan, h0=h0, fac=fac,
392
+ atol=atol, rtol=rtol, nit=nit, nfev=nfev,
393
+ status=status, dtype=dtype, terms=(order+1)//2,
394
+ hdir=hdir, il=il, ic=ic, ir=ir, io=io)
395
+ # This is the correspondence between terms in the `work` object and the
396
+ # final result. In this case, the mapping is trivial. Note that `success`
397
+ # is prepended automatically.
398
+ res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
399
+ ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
400
+
401
+ def pre_func_eval(work):
402
+ """Determine the abscissae at which the function needs to be evaluated.
403
+
404
+ See `_differentiate_weights` for a description of the stencil (pattern
405
+ of the abscissae).
406
+
407
+ In the first iteration, there is only one stored function value in
408
+ `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
409
+ subsequent iterations, we evaluate at two new points. Note that
410
+ `work.x` is always flattened into a 1D array after broadcasting with
411
+ all `args`, so we add a new axis at the end and evaluate all point
412
+ in one call to the function.
413
+
414
+ For improvement:
415
+ - Consider measuring the step size actually taken, since `(x + h) - x`
416
+ is not identically equal to `h` with floating point arithmetic.
417
+ - Adjust the step size automatically if `x` is too big to resolve the
418
+ step.
419
+ - We could probably save some work if there are no central difference
420
+ steps or no one-sided steps.
421
+ """
422
+ n = work.terms # half the order
423
+ h = work.h # step size
424
+ c = work.fac # step reduction factor
425
+ d = c**0.5 # square root of step reduction factor (one-sided stencil)
426
+ # Note - no need to be careful about dtypes until we allocate `x_eval`
427
+
428
+ if work.nit == 0:
429
+ hc = h / c**np.arange(n)
430
+ hc = np.concatenate((-hc[::-1], hc))
431
+ else:
432
+ hc = np.asarray([-h, h]) / c**(n-1)
433
+
434
+ if work.nit == 0:
435
+ hr = h / d**np.arange(2*n)
436
+ else:
437
+ hr = np.asarray([h, h/d]) / c**(n-1)
438
+
439
+ n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
440
+ x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype)
441
+ il, ic, ir = work.il, work.ic, work.ir
442
+ x_eval[ir] = work.x[ir, np.newaxis] + hr
443
+ x_eval[ic] = work.x[ic, np.newaxis] + hc
444
+ x_eval[il] = work.x[il, np.newaxis] - hr
445
+ return x_eval
446
+
447
+ def post_func_eval(x, f, work):
448
+ """ Estimate the derivative and error from the function evaluations
449
+
450
+ As in `pre_func_eval`: in the first iteration, there is only one stored
451
+ function value in `work.fs`, `f(x)`, so we need to add the `order` new
452
+ points. In subsequent iterations, we add two new points. The tricky
453
+ part is getting the order to match that of the weights, which is
454
+ described in `_differentiate_weights`.
455
+
456
+ For improvement:
457
+ - Change the order of the weights (and steps in `pre_func_eval`) to
458
+ simplify `work_fc` concatenation and eliminate `fc` concatenation.
459
+ - It would be simple to do one-step Richardson extrapolation with `df`
460
+ and `df_last` to increase the order of the estimate and/or improve
461
+ the error estimate.
462
+ - Process the function evaluations in a more numerically favorable
463
+ way. For instance, combining the pairs of central difference evals
464
+ into a second-order approximation and using Richardson extrapolation
465
+ to produce a higher order approximation seemed to retain accuracy up
466
+ to very high order.
467
+ - Alternatively, we could use `polyfit` like Jacobi. An advantage of
468
+ fitting polynomial to more points than necessary is improved noise
469
+ tolerance.
470
+ """
471
+ n = work.terms
472
+ n_new = n if work.nit == 0 else 1
473
+ il, ic, io = work.il, work.ic, work.io
474
+
475
+ # Central difference
476
+ # `work_fc` is *all* the points at which the function has been evaluated
477
+ # `fc` is the points we're using *this iteration* to produce the estimate
478
+ work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:])
479
+ work_fc = np.concatenate(work_fc, axis=-1)
480
+ if work.nit == 0:
481
+ fc = work_fc
482
+ else:
483
+ fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
484
+ fc = np.concatenate(fc, axis=-1)
485
+
486
+ # One-sided difference
487
+ work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1)
488
+ if work.nit == 0:
489
+ fo = work_fo
490
+ else:
491
+ fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
492
+
493
+ work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new))
494
+ work.fs[ic] = work_fc
495
+ work.fs[io] = work_fo
496
+
497
+ wc, wo = _differentiate_weights(work, n)
498
+ work.df_last = work.df.copy()
499
+ work.df[ic] = fc @ wc / work.h
500
+ work.df[io] = fo @ wo / work.h
501
+ work.df[il] *= -1
502
+
503
+ work.h /= work.fac
504
+ work.error_last = work.error
505
+ # Simple error estimate - the difference in derivative estimates between
506
+ # this iteration and the last. This is typically conservative because if
507
+ # convergence has begin, the true error is much closer to the difference
508
+ # between the current estimate and the *next* error estimate. However,
509
+ # we could use Richarson extrapolation to produce an error estimate that
510
+ # is one order higher, and take the difference between that and
511
+ # `work.df` (which would just be constant factor that depends on `fac`.)
512
+ work.error = abs(work.df - work.df_last)
513
+
514
+ def check_termination(work):
515
+ """Terminate due to convergence, non-finite values, or error increase"""
516
+ stop = np.zeros_like(work.df).astype(bool)
517
+
518
+ i = work.error < work.atol + work.rtol*abs(work.df)
519
+ work.status[i] = eim._ECONVERGED
520
+ stop[i] = True
521
+
522
+ if work.nit > 0:
523
+ i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop)
524
+ work.df[i], work.status[i] = np.nan, eim._EVALUEERR
525
+ stop[i] = True
526
+
527
+ # With infinite precision, there is a step size below which
528
+ # all smaller step sizes will reduce the error. But in floating point
529
+ # arithmetic, catastrophic cancellation will begin to cause the error
530
+ # to increase again. This heuristic tries to avoid step sizes that are
531
+ # too small. There may be more theoretically sound approaches for
532
+ # detecting a step size that minimizes the total error, but this
533
+ # heuristic seems simple and effective.
534
+ i = (work.error > work.error_last*10) & ~stop
535
+ work.status[i] = _EERRORINCREASE
536
+ stop[i] = True
537
+
538
+ return stop
539
+
540
+ def post_termination_check(work):
541
+ return
542
+
543
+ def customize_result(res, shape):
544
+ return shape
545
+
546
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
547
+ pre_func_eval, post_func_eval, check_termination,
548
+ post_termination_check, customize_result, res_work_pairs,
549
+ preserve_shape)
550
+
551
+
552
+ def _differentiate_weights(work, n):
553
+ # This produces the weights of the finite difference formula for a given
554
+ # stencil. In experiments, use of a second-order central difference formula
555
+ # with Richardson extrapolation was more accurate numerically, but it was
556
+ # more complicated, and it would have become even more complicated when
557
+ # adding support for one-sided differences. However, now that all the
558
+ # function evaluation values are stored, they can be processed in whatever
559
+ # way is desired to produce the derivative estimate. We leave alternative
560
+ # approaches to future work. To be more self-contained, here is the theory
561
+ # for deriving the weights below.
562
+ #
563
+ # Recall that the Taylor expansion of a univariate, scalar-values function
564
+ # about a point `x` may be expressed as:
565
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
566
+ # Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
567
+ # f(x) = f(x)
568
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
569
+ # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
570
+ # We can solve for weights `wi` such that:
571
+ # w1*f(x) = w1*(f(x))
572
+ # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
573
+ # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
574
+ # = 0 + f'(x)*h + 0 + O(h**3)
575
+ # Then
576
+ # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
577
+ # is a finite difference derivative approximation with error O(h**2),
578
+ # and so it is said to be a "second-order" approximation. Under certain
579
+ # conditions (e.g. well-behaved function, `h` sufficiently small), the
580
+ # error in the approximation will decrease with h**2; that is, if `h` is
581
+ # reduced by a factor of 2, the error is reduced by a factor of 4.
582
+ #
583
+ # By default, we use eighth-order formulae. Our central-difference formula
584
+ # uses abscissae:
585
+ # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
586
+ # where `c` is the step factor. (Typically, the step factor is greater than
587
+ # one, so the outermost points - as written above - are actually closest to
588
+ # `x`.) This "stencil" is chosen so that each iteration, the step can be
589
+ # reduced by the factor `c`, and most of the function evaluations can be
590
+ # reused with the new step size. For example, in the next iteration, we
591
+ # will have:
592
+ # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
593
+ # We do not reuse `x-h` and `x+h` for the new derivative estimate.
594
+ # While this would increase the order of the formula and thus the
595
+ # theoretical convergence rate, it is also less stable numerically.
596
+ # (As noted above, there are other ways of processing the values that are
597
+ # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
598
+ # to simplify future development of this sort of improvement.)
599
+ #
600
+ # The (right) one-sided formula is produced similarly using abscissae
601
+ # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
602
+ # where `d` is the square root of `c`. (The left one-sided formula simply
603
+ # uses -h.) When the step size is reduced by factor `c = d**2`, we have
604
+ # abscissae:
605
+ # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
606
+ # `d` is chosen as the square root of `c` so that the rate of the step-size
607
+ # reduction is the same per iteration as in the central difference case.
608
+ # Note that because the central difference formulas are inherently of even
609
+ # order, for simplicity, we use only even-order formulas for one-sided
610
+ # differences, too.
611
+
612
+ # It's possible for the user to specify `fac` in, say, double precision but
613
+ # `x` and `args` in single precision. `fac` gets converted to single
614
+ # precision, but we should always use double precision for the intermediate
615
+ # calculations here to avoid additional error in the weights.
616
+ fac = work.fac.astype(np.float64)
617
+
618
+ # Note that if the user switches back to floating point precision with
619
+ # `x` and `args`, then `fac` will not necessarily equal the (lower
620
+ # precision) cached `_differentiate_weights.fac`, and the weights will
621
+ # need to be recalculated. This could be fixed, but it's late, and of
622
+ # low consequence.
623
+ if fac != _differentiate_weights.fac:
624
+ _differentiate_weights.central = []
625
+ _differentiate_weights.right = []
626
+ _differentiate_weights.fac = fac
627
+
628
+ if len(_differentiate_weights.central) != 2*n + 1:
629
+ # Central difference weights. Consider refactoring this; it could
630
+ # probably be more compact.
631
+ i = np.arange(-n, n + 1)
632
+ p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
633
+ s = np.sign(i)
634
+
635
+ h = s / fac ** p
636
+ A = np.vander(h, increasing=True).T
637
+ b = np.zeros(2*n + 1)
638
+ b[1] = 1
639
+ weights = np.linalg.solve(A, b)
640
+
641
+ # Enforce identities to improve accuracy
642
+ weights[n] = 0
643
+ for i in range(n):
644
+ weights[-i-1] = -weights[i]
645
+
646
+ # Cache the weights. We only need to calculate them once unless
647
+ # the step factor changes.
648
+ _differentiate_weights.central = weights
649
+
650
+ # One-sided difference weights. The left one-sided weights (with
651
+ # negative steps) are simply the negative of the right one-sided
652
+ # weights, so no need to compute them separately.
653
+ i = np.arange(2*n + 1)
654
+ p = i - 1.
655
+ s = np.sign(i)
656
+
657
+ h = s / np.sqrt(fac) ** p
658
+ A = np.vander(h, increasing=True).T
659
+ b = np.zeros(2 * n + 1)
660
+ b[1] = 1
661
+ weights = np.linalg.solve(A, b)
662
+
663
+ _differentiate_weights.right = weights
664
+
665
+ return (_differentiate_weights.central.astype(work.dtype, copy=False),
666
+ _differentiate_weights.right.astype(work.dtype, copy=False))
667
+ _differentiate_weights.central = []
668
+ _differentiate_weights.right = []
669
+ _differentiate_weights.fac = None
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (43.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct_py.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import ( # noqa: UP035
3
+ Any, Callable, Iterable, TYPE_CHECKING
4
+ )
5
+
6
+ import numpy as np
7
+ from scipy.optimize import OptimizeResult
8
+ from ._constraints import old_bound_to_new, Bounds
9
+ from ._direct import direct as _direct # type: ignore
10
+
11
+ if TYPE_CHECKING:
12
+ import numpy.typing as npt
13
+
14
+ __all__ = ['direct']
15
+
16
+ ERROR_MESSAGES = (
17
+ "Number of function evaluations done is larger than maxfun={}",
18
+ "Number of iterations is larger than maxiter={}",
19
+ "u[i] < l[i] for some i",
20
+ "maxfun is too large",
21
+ "Initialization failed",
22
+ "There was an error in the creation of the sample points",
23
+ "An error occurred while the function was sampled",
24
+ "Maximum number of levels has been reached.",
25
+ "Forced stop",
26
+ "Invalid arguments",
27
+ "Out of memory",
28
+ )
29
+
30
+ SUCCESS_MESSAGES = (
31
+ ("The best function value found is within a relative error={} "
32
+ "of the (known) global optimum f_min"),
33
+ ("The volume of the hyperrectangle containing the lowest function value "
34
+ "found is below vol_tol={}"),
35
+ ("The side length measure of the hyperrectangle containing the lowest "
36
+ "function value found is below len_tol={}"),
37
+ )
38
+
39
+
40
+ def direct(
41
+ func: Callable[[npt.ArrayLike, tuple[Any]], float],
42
+ bounds: Iterable | Bounds,
43
+ *,
44
+ args: tuple = (),
45
+ eps: float = 1e-4,
46
+ maxfun: int | None = None,
47
+ maxiter: int = 1000,
48
+ locally_biased: bool = True,
49
+ f_min: float = -np.inf,
50
+ f_min_rtol: float = 1e-4,
51
+ vol_tol: float = 1e-16,
52
+ len_tol: float = 1e-6,
53
+ callback: Callable[[npt.ArrayLike], None] | None = None
54
+ ) -> OptimizeResult:
55
+ """
56
+ Finds the global minimum of a function using the
57
+ DIRECT algorithm.
58
+
59
+ Parameters
60
+ ----------
61
+ func : callable
62
+ The objective function to be minimized.
63
+ ``func(x, *args) -> float``
64
+ where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of
65
+ the fixed parameters needed to completely specify the function.
66
+ bounds : sequence or `Bounds`
67
+ Bounds for variables. There are two ways to specify the bounds:
68
+
69
+ 1. Instance of `Bounds` class.
70
+ 2. ``(min, max)`` pairs for each element in ``x``.
71
+
72
+ args : tuple, optional
73
+ Any additional fixed parameters needed to
74
+ completely specify the objective function.
75
+ eps : float, optional
76
+ Minimal required difference of the objective function values
77
+ between the current best hyperrectangle and the next potentially
78
+ optimal hyperrectangle to be divided. In consequence, `eps` serves as a
79
+ tradeoff between local and global search: the smaller, the more local
80
+ the search becomes. Default is 1e-4.
81
+ maxfun : int or None, optional
82
+ Approximate upper bound on objective function evaluations.
83
+ If `None`, will be automatically set to ``1000 * N`` where ``N``
84
+ represents the number of dimensions. Will be capped if necessary to
85
+ limit DIRECT's RAM usage to app. 1GiB. This will only occur for very
86
+ high dimensional problems and excessive `max_fun`. Default is `None`.
87
+ maxiter : int, optional
88
+ Maximum number of iterations. Default is 1000.
89
+ locally_biased : bool, optional
90
+ If `True` (default), use the locally biased variant of the
91
+ algorithm known as DIRECT_L. If `False`, use the original unbiased
92
+ DIRECT algorithm. For hard problems with many local minima,
93
+ `False` is recommended.
94
+ f_min : float, optional
95
+ Function value of the global optimum. Set this value only if the
96
+ global optimum is known. Default is ``-np.inf``, so that this
97
+ termination criterion is deactivated.
98
+ f_min_rtol : float, optional
99
+ Terminate the optimization once the relative error between the
100
+ current best minimum `f` and the supplied global minimum `f_min`
101
+ is smaller than `f_min_rtol`. This parameter is only used if
102
+ `f_min` is also set. Must lie between 0 and 1. Default is 1e-4.
103
+ vol_tol : float, optional
104
+ Terminate the optimization once the volume of the hyperrectangle
105
+ containing the lowest function value is smaller than `vol_tol`
106
+ of the complete search space. Must lie between 0 and 1.
107
+ Default is 1e-16.
108
+ len_tol : float, optional
109
+ If `locally_biased=True`, terminate the optimization once half of
110
+ the normalized maximal side length of the hyperrectangle containing
111
+ the lowest function value is smaller than `len_tol`.
112
+ If `locally_biased=False`, terminate the optimization once half of
113
+ the normalized diagonal of the hyperrectangle containing the lowest
114
+ function value is smaller than `len_tol`. Must lie between 0 and 1.
115
+ Default is 1e-6.
116
+ callback : callable, optional
117
+ A callback function with signature ``callback(xk)`` where ``xk``
118
+ represents the best function value found so far.
119
+
120
+ Returns
121
+ -------
122
+ res : OptimizeResult
123
+ The optimization result represented as a ``OptimizeResult`` object.
124
+ Important attributes are: ``x`` the solution array, ``success`` a
125
+ Boolean flag indicating if the optimizer exited successfully and
126
+ ``message`` which describes the cause of the termination. See
127
+ `OptimizeResult` for a description of other attributes.
128
+
129
+ Notes
130
+ -----
131
+ DIviding RECTangles (DIRECT) is a deterministic global
132
+ optimization algorithm capable of minimizing a black box function with
133
+ its variables subject to lower and upper bound constraints by sampling
134
+ potential solutions in the search space [1]_. The algorithm starts by
135
+ normalising the search space to an n-dimensional unit hypercube.
136
+ It samples the function at the center of this hypercube and at 2n
137
+ (n is the number of variables) more points, 2 in each coordinate
138
+ direction. Using these function values, DIRECT then divides the
139
+ domain into hyperrectangles, each having exactly one of the sampling
140
+ points as its center. In each iteration, DIRECT chooses, using the `eps`
141
+ parameter which defaults to 1e-4, some of the existing hyperrectangles
142
+ to be further divided. This division process continues until either the
143
+ maximum number of iterations or maximum function evaluations allowed
144
+ are exceeded, or the hyperrectangle containing the minimal value found
145
+ so far becomes small enough. If `f_min` is specified, the optimization
146
+ will stop once this function value is reached within a relative tolerance.
147
+ The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is
148
+ used by default. It makes the search more locally biased and more
149
+ efficient for cases with only a few local minima.
150
+
151
+ A note about termination criteria: `vol_tol` refers to the volume of the
152
+ hyperrectangle containing the lowest function value found so far. This
153
+ volume decreases exponentially with increasing dimensionality of the
154
+ problem. Therefore `vol_tol` should be decreased to avoid premature
155
+ termination of the algorithm for higher dimensions. This does not hold
156
+ for `len_tol`: it refers either to half of the maximal side length
157
+ (for ``locally_biased=True``) or half of the diagonal of the
158
+ hyperrectangle (for ``locally_biased=False``).
159
+
160
+ This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at
161
+ https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz .
162
+ This original version was initially converted via f2c and then cleaned up
163
+ and reorganized by Steven G. Johnson, August 2007, for the NLopt project.
164
+ The `direct` function wraps the C implementation.
165
+
166
+ .. versionadded:: 1.9.0
167
+
168
+ References
169
+ ----------
170
+ .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian
171
+ optimization without the Lipschitz constant. J Optim Theory Appl
172
+ 79, 157-181 (1993).
173
+ .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT
174
+ Algorithm. Journal of Global Optimization 21, 27-37 (2001).
175
+
176
+ Examples
177
+ --------
178
+ The following example is a 2-D problem with four local minima: minimizing
179
+ the Styblinski-Tang function
180
+ (https://en.wikipedia.org/wiki/Test_functions_for_optimization).
181
+
182
+ >>> from scipy.optimize import direct, Bounds
183
+ >>> def styblinski_tang(pos):
184
+ ... x, y = pos
185
+ ... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y)
186
+ >>> bounds = Bounds([-4., -4.], [4., 4.])
187
+ >>> result = direct(styblinski_tang, bounds)
188
+ >>> result.x, result.fun, result.nfev
189
+ array([-2.90321597, -2.90321597]), -78.3323279095383, 2011
190
+
191
+ The correct global minimum was found but with a huge number of function
192
+ evaluations (2011). Loosening the termination tolerances `vol_tol` and
193
+ `len_tol` can be used to stop DIRECT earlier.
194
+
195
+ >>> result = direct(styblinski_tang, bounds, len_tol=1e-3)
196
+ >>> result.x, result.fun, result.nfev
197
+ array([-2.9044353, -2.9044353]), -78.33230330754142, 207
198
+
199
+ """
200
+ # convert bounds to new Bounds class if necessary
201
+ if not isinstance(bounds, Bounds):
202
+ if isinstance(bounds, list) or isinstance(bounds, tuple):
203
+ lb, ub = old_bound_to_new(bounds)
204
+ bounds = Bounds(lb, ub)
205
+ else:
206
+ message = ("bounds must be a sequence or "
207
+ "instance of Bounds class")
208
+ raise ValueError(message)
209
+
210
+ lb = np.ascontiguousarray(bounds.lb, dtype=np.float64)
211
+ ub = np.ascontiguousarray(bounds.ub, dtype=np.float64)
212
+
213
+ # validate bounds
214
+ # check that lower bounds are smaller than upper bounds
215
+ if not np.all(lb < ub):
216
+ raise ValueError('Bounds are not consistent min < max')
217
+ # check for infs
218
+ if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))):
219
+ raise ValueError("Bounds must not be inf.")
220
+
221
+ # validate tolerances
222
+ if (vol_tol < 0 or vol_tol > 1):
223
+ raise ValueError("vol_tol must be between 0 and 1.")
224
+ if (len_tol < 0 or len_tol > 1):
225
+ raise ValueError("len_tol must be between 0 and 1.")
226
+ if (f_min_rtol < 0 or f_min_rtol > 1):
227
+ raise ValueError("f_min_rtol must be between 0 and 1.")
228
+
229
+ # validate maxfun and maxiter
230
+ if maxfun is None:
231
+ maxfun = 1000 * lb.shape[0]
232
+ if not isinstance(maxfun, int):
233
+ raise ValueError("maxfun must be of type int.")
234
+ if maxfun < 0:
235
+ raise ValueError("maxfun must be > 0.")
236
+ if not isinstance(maxiter, int):
237
+ raise ValueError("maxiter must be of type int.")
238
+ if maxiter < 0:
239
+ raise ValueError("maxiter must be > 0.")
240
+
241
+ # validate boolean parameters
242
+ if not isinstance(locally_biased, bool):
243
+ raise ValueError("locally_biased must be True or False.")
244
+
245
+ def _func_wrap(x, args=None):
246
+ x = np.asarray(x)
247
+ if args is None:
248
+ f = func(x)
249
+ else:
250
+ f = func(x, *args)
251
+ # always return a float
252
+ return np.asarray(f).item()
253
+
254
+ # TODO: fix disp argument
255
+ x, fun, ret_code, nfev, nit = _direct(
256
+ _func_wrap,
257
+ np.asarray(lb), np.asarray(ub),
258
+ args,
259
+ False, eps, maxfun, maxiter,
260
+ locally_biased,
261
+ f_min, f_min_rtol,
262
+ vol_tol, len_tol, callback
263
+ )
264
+
265
+ format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol)
266
+ if ret_code > 2:
267
+ message = SUCCESS_MESSAGES[ret_code - 3].format(
268
+ format_val[ret_code - 1])
269
+ elif 0 < ret_code <= 2:
270
+ message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1])
271
+ elif 0 > ret_code > -100:
272
+ message = ERROR_MESSAGES[abs(ret_code) + 1]
273
+ else:
274
+ message = ERROR_MESSAGES[ret_code + 99]
275
+
276
+ return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code,
277
+ success=ret_code > 2, message=message,
278
+ nfev=nfev, nit=nit)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dual Annealing implementation.
2
+ # Copyright (c) 2018 Sylvain Gubian <[email protected]>,
3
+ # Yang Xiang <[email protected]>
4
+ # Author: Sylvain Gubian, Yang Xiang, PMP S.A.
5
+
6
+ """
7
+ A Dual Annealing global optimization algorithm
8
+ """
9
+
10
+ import numpy as np
11
+ from scipy.optimize import OptimizeResult
12
+ from scipy.optimize import minimize, Bounds
13
+ from scipy.special import gammaln
14
+ from scipy._lib._util import check_random_state
15
+ from scipy.optimize._constraints import new_bounds_to_old
16
+
17
+ __all__ = ['dual_annealing']
18
+
19
+
20
+ class VisitingDistribution:
21
+ """
22
+ Class used to generate new coordinates based on the distorted
23
+ Cauchy-Lorentz distribution. Depending on the steps within the strategy
24
+ chain, the class implements the strategy for generating new location
25
+ changes.
26
+
27
+ Parameters
28
+ ----------
29
+ lb : array_like
30
+ A 1-D NumPy ndarray containing lower bounds of the generated
31
+ components. Neither NaN or inf are allowed.
32
+ ub : array_like
33
+ A 1-D NumPy ndarray containing upper bounds for the generated
34
+ components. Neither NaN or inf are allowed.
35
+ visiting_param : float
36
+ Parameter for visiting distribution. Default value is 2.62.
37
+ Higher values give the visiting distribution a heavier tail, this
38
+ makes the algorithm jump to a more distant region.
39
+ The value range is (1, 3]. Its value is fixed for the life of the
40
+ object.
41
+ rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
42
+ A `~numpy.random.RandomState`, `~numpy.random.Generator` object
43
+ for using the current state of the created random generator container.
44
+
45
+ """
46
+ TAIL_LIMIT = 1.e8
47
+ MIN_VISIT_BOUND = 1.e-10
48
+
49
+ def __init__(self, lb, ub, visiting_param, rand_gen):
50
+ # if you wish to make _visiting_param adjustable during the life of
51
+ # the object then _factor2, _factor3, _factor5, _d1, _factor6 will
52
+ # have to be dynamically calculated in `visit_fn`. They're factored
53
+ # out here so they don't need to be recalculated all the time.
54
+ self._visiting_param = visiting_param
55
+ self.rand_gen = rand_gen
56
+ self.lower = lb
57
+ self.upper = ub
58
+ self.bound_range = ub - lb
59
+
60
+ # these are invariant numbers unless visiting_param changes
61
+ self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
62
+ self._visiting_param - 1.0))
63
+ self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
64
+ / (self._visiting_param - 1.0))
65
+ self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
66
+ 3.0 - self._visiting_param))
67
+
68
+ self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
69
+ self._d1 = 2.0 - self._factor5
70
+ self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
71
+ np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
72
+
73
+ def visiting(self, x, step, temperature):
74
+ """ Based on the step in the strategy chain, new coordinates are
75
+ generated by changing all components is the same time or only
76
+ one of them, the new values are computed with visit_fn method
77
+ """
78
+ dim = x.size
79
+ if step < dim:
80
+ # Changing all coordinates with a new visiting value
81
+ visits = self.visit_fn(temperature, dim)
82
+ upper_sample, lower_sample = self.rand_gen.uniform(size=2)
83
+ visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
84
+ visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
85
+ x_visit = visits + x
86
+ a = x_visit - self.lower
87
+ b = np.fmod(a, self.bound_range) + self.bound_range
88
+ x_visit = np.fmod(b, self.bound_range) + self.lower
89
+ x_visit[np.fabs(
90
+ x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
91
+ else:
92
+ # Changing only one coordinate at a time based on strategy
93
+ # chain step
94
+ x_visit = np.copy(x)
95
+ visit = self.visit_fn(temperature, 1)[0]
96
+ if visit > self.TAIL_LIMIT:
97
+ visit = self.TAIL_LIMIT * self.rand_gen.uniform()
98
+ elif visit < -self.TAIL_LIMIT:
99
+ visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
100
+ index = step - dim
101
+ x_visit[index] = visit + x[index]
102
+ a = x_visit[index] - self.lower[index]
103
+ b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
104
+ x_visit[index] = np.fmod(b, self.bound_range[
105
+ index]) + self.lower[index]
106
+ if np.fabs(x_visit[index] - self.lower[
107
+ index]) < self.MIN_VISIT_BOUND:
108
+ x_visit[index] += self.MIN_VISIT_BOUND
109
+ return x_visit
110
+
111
+ def visit_fn(self, temperature, dim):
112
+ """ Formula Visita from p. 405 of reference [2] """
113
+ x, y = self.rand_gen.normal(size=(dim, 2)).T
114
+
115
+ factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
116
+ factor4 = self._factor4_p * factor1
117
+
118
+ # sigmax
119
+ x *= np.exp(-(self._visiting_param - 1.0) * np.log(
120
+ self._factor6 / factor4) / (3.0 - self._visiting_param))
121
+
122
+ den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
123
+ (3.0 - self._visiting_param))
124
+
125
+ return x / den
126
+
127
+
128
+ class EnergyState:
129
+ """
130
+ Class used to record the energy state. At any time, it knows what is the
131
+ currently used coordinates and the most recent best location.
132
+
133
+ Parameters
134
+ ----------
135
+ lower : array_like
136
+ A 1-D NumPy ndarray containing lower bounds for generating an initial
137
+ random components in the `reset` method.
138
+ upper : array_like
139
+ A 1-D NumPy ndarray containing upper bounds for generating an initial
140
+ random components in the `reset` method
141
+ components. Neither NaN or inf are allowed.
142
+ callback : callable, ``callback(x, f, context)``, optional
143
+ A callback function which will be called for all minima found.
144
+ ``x`` and ``f`` are the coordinates and function value of the
145
+ latest minimum found, and `context` has value in [0, 1, 2]
146
+ """
147
+ # Maximum number of trials for generating a valid starting point
148
+ MAX_REINIT_COUNT = 1000
149
+
150
+ def __init__(self, lower, upper, callback=None):
151
+ self.ebest = None
152
+ self.current_energy = None
153
+ self.current_location = None
154
+ self.xbest = None
155
+ self.lower = lower
156
+ self.upper = upper
157
+ self.callback = callback
158
+
159
+ def reset(self, func_wrapper, rand_gen, x0=None):
160
+ """
161
+ Initialize current location is the search domain. If `x0` is not
162
+ provided, a random location within the bounds is generated.
163
+ """
164
+ if x0 is None:
165
+ self.current_location = rand_gen.uniform(self.lower, self.upper,
166
+ size=len(self.lower))
167
+ else:
168
+ self.current_location = np.copy(x0)
169
+ init_error = True
170
+ reinit_counter = 0
171
+ while init_error:
172
+ self.current_energy = func_wrapper.fun(self.current_location)
173
+ if self.current_energy is None:
174
+ raise ValueError('Objective function is returning None')
175
+ if (not np.isfinite(self.current_energy) or np.isnan(
176
+ self.current_energy)):
177
+ if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
178
+ init_error = False
179
+ message = (
180
+ 'Stopping algorithm because function '
181
+ 'create NaN or (+/-) infinity values even with '
182
+ 'trying new random parameters'
183
+ )
184
+ raise ValueError(message)
185
+ self.current_location = rand_gen.uniform(self.lower,
186
+ self.upper,
187
+ size=self.lower.size)
188
+ reinit_counter += 1
189
+ else:
190
+ init_error = False
191
+ # If first time reset, initialize ebest and xbest
192
+ if self.ebest is None and self.xbest is None:
193
+ self.ebest = self.current_energy
194
+ self.xbest = np.copy(self.current_location)
195
+ # Otherwise, we keep them in case of reannealing reset
196
+
197
+ def update_best(self, e, x, context):
198
+ self.ebest = e
199
+ self.xbest = np.copy(x)
200
+ if self.callback is not None:
201
+ val = self.callback(x, e, context)
202
+ if val is not None:
203
+ if val:
204
+ return ('Callback function requested to stop early by '
205
+ 'returning True')
206
+
207
+ def update_current(self, e, x):
208
+ self.current_energy = e
209
+ self.current_location = np.copy(x)
210
+
211
+
212
+ class StrategyChain:
213
+ """
214
+ Class that implements within a Markov chain the strategy for location
215
+ acceptance and local search decision making.
216
+
217
+ Parameters
218
+ ----------
219
+ acceptance_param : float
220
+ Parameter for acceptance distribution. It is used to control the
221
+ probability of acceptance. The lower the acceptance parameter, the
222
+ smaller the probability of acceptance. Default value is -5.0 with
223
+ a range (-1e4, -5].
224
+ visit_dist : VisitingDistribution
225
+ Instance of `VisitingDistribution` class.
226
+ func_wrapper : ObjectiveFunWrapper
227
+ Instance of `ObjectiveFunWrapper` class.
228
+ minimizer_wrapper: LocalSearchWrapper
229
+ Instance of `LocalSearchWrapper` class.
230
+ rand_gen : {None, int, `numpy.random.Generator`,
231
+ `numpy.random.RandomState`}, optional
232
+
233
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
234
+ singleton is used.
235
+ If `seed` is an int, a new ``RandomState`` instance is used,
236
+ seeded with `seed`.
237
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
238
+ that instance is used.
239
+ energy_state: EnergyState
240
+ Instance of `EnergyState` class.
241
+
242
+ """
243
+
244
+ def __init__(self, acceptance_param, visit_dist, func_wrapper,
245
+ minimizer_wrapper, rand_gen, energy_state):
246
+ # Local strategy chain minimum energy and location
247
+ self.emin = energy_state.current_energy
248
+ self.xmin = np.array(energy_state.current_location)
249
+ # Global optimizer state
250
+ self.energy_state = energy_state
251
+ # Acceptance parameter
252
+ self.acceptance_param = acceptance_param
253
+ # Visiting distribution instance
254
+ self.visit_dist = visit_dist
255
+ # Wrapper to objective function
256
+ self.func_wrapper = func_wrapper
257
+ # Wrapper to the local minimizer
258
+ self.minimizer_wrapper = minimizer_wrapper
259
+ self.not_improved_idx = 0
260
+ self.not_improved_max_idx = 1000
261
+ self._rand_gen = rand_gen
262
+ self.temperature_step = 0
263
+ self.K = 100 * len(energy_state.current_location)
264
+
265
+ def accept_reject(self, j, e, x_visit):
266
+ r = self._rand_gen.uniform()
267
+ pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
268
+ (e - self.energy_state.current_energy) / self.temperature_step)
269
+ if pqv_temp <= 0.:
270
+ pqv = 0.
271
+ else:
272
+ pqv = np.exp(np.log(pqv_temp) / (
273
+ 1. - self.acceptance_param))
274
+
275
+ if r <= pqv:
276
+ # We accept the new location and update state
277
+ self.energy_state.update_current(e, x_visit)
278
+ self.xmin = np.copy(self.energy_state.current_location)
279
+
280
+ # No improvement for a long time
281
+ if self.not_improved_idx >= self.not_improved_max_idx:
282
+ if j == 0 or self.energy_state.current_energy < self.emin:
283
+ self.emin = self.energy_state.current_energy
284
+ self.xmin = np.copy(self.energy_state.current_location)
285
+
286
+ def run(self, step, temperature):
287
+ self.temperature_step = temperature / float(step + 1)
288
+ self.not_improved_idx += 1
289
+ for j in range(self.energy_state.current_location.size * 2):
290
+ if j == 0:
291
+ if step == 0:
292
+ self.energy_state_improved = True
293
+ else:
294
+ self.energy_state_improved = False
295
+ x_visit = self.visit_dist.visiting(
296
+ self.energy_state.current_location, j, temperature)
297
+ # Calling the objective function
298
+ e = self.func_wrapper.fun(x_visit)
299
+ if e < self.energy_state.current_energy:
300
+ # We have got a better energy value
301
+ self.energy_state.update_current(e, x_visit)
302
+ if e < self.energy_state.ebest:
303
+ val = self.energy_state.update_best(e, x_visit, 0)
304
+ if val is not None:
305
+ if val:
306
+ return val
307
+ self.energy_state_improved = True
308
+ self.not_improved_idx = 0
309
+ else:
310
+ # We have not improved but do we accept the new location?
311
+ self.accept_reject(j, e, x_visit)
312
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
313
+ return ('Maximum number of function call reached '
314
+ 'during annealing')
315
+ # End of StrategyChain loop
316
+
317
+ def local_search(self):
318
+ # Decision making for performing a local search
319
+ # based on strategy chain results
320
+ # If energy has been improved or no improvement since too long,
321
+ # performing a local search with the best strategy chain location
322
+ if self.energy_state_improved:
323
+ # Global energy has improved, let's see if LS improves further
324
+ e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
325
+ self.energy_state.ebest)
326
+ if e < self.energy_state.ebest:
327
+ self.not_improved_idx = 0
328
+ val = self.energy_state.update_best(e, x, 1)
329
+ if val is not None:
330
+ if val:
331
+ return val
332
+ self.energy_state.update_current(e, x)
333
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
334
+ return ('Maximum number of function call reached '
335
+ 'during local search')
336
+ # Check probability of a need to perform a LS even if no improvement
337
+ do_ls = False
338
+ if self.K < 90 * len(self.energy_state.current_location):
339
+ pls = np.exp(self.K * (
340
+ self.energy_state.ebest - self.energy_state.current_energy) /
341
+ self.temperature_step)
342
+ if pls >= self._rand_gen.uniform():
343
+ do_ls = True
344
+ # Global energy not improved, let's see what LS gives
345
+ # on the best strategy chain location
346
+ if self.not_improved_idx >= self.not_improved_max_idx:
347
+ do_ls = True
348
+ if do_ls:
349
+ e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
350
+ self.xmin = np.copy(x)
351
+ self.emin = e
352
+ self.not_improved_idx = 0
353
+ self.not_improved_max_idx = self.energy_state.current_location.size
354
+ if e < self.energy_state.ebest:
355
+ val = self.energy_state.update_best(
356
+ self.emin, self.xmin, 2)
357
+ if val is not None:
358
+ if val:
359
+ return val
360
+ self.energy_state.update_current(e, x)
361
+ if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
362
+ return ('Maximum number of function call reached '
363
+ 'during dual annealing')
364
+
365
+
366
+ class ObjectiveFunWrapper:
367
+
368
+ def __init__(self, func, maxfun=1e7, *args):
369
+ self.func = func
370
+ self.args = args
371
+ # Number of objective function evaluations
372
+ self.nfev = 0
373
+ # Number of gradient function evaluation if used
374
+ self.ngev = 0
375
+ # Number of hessian of the objective function if used
376
+ self.nhev = 0
377
+ self.maxfun = maxfun
378
+
379
+ def fun(self, x):
380
+ self.nfev += 1
381
+ return self.func(x, *self.args)
382
+
383
+
384
+ class LocalSearchWrapper:
385
+ """
386
+ Class used to wrap around the minimizer used for local search
387
+ Default local minimizer is SciPy minimizer L-BFGS-B
388
+ """
389
+
390
+ LS_MAXITER_RATIO = 6
391
+ LS_MAXITER_MIN = 100
392
+ LS_MAXITER_MAX = 1000
393
+
394
+ def __init__(self, search_bounds, func_wrapper, *args, **kwargs):
395
+ self.func_wrapper = func_wrapper
396
+ self.kwargs = kwargs
397
+ self.jac = self.kwargs.get('jac', None)
398
+ self.minimizer = minimize
399
+ bounds_list = list(zip(*search_bounds))
400
+ self.lower = np.array(bounds_list[0])
401
+ self.upper = np.array(bounds_list[1])
402
+
403
+ # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
404
+ if not self.kwargs:
405
+ n = len(self.lower)
406
+ ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
407
+ self.LS_MAXITER_MIN),
408
+ self.LS_MAXITER_MAX)
409
+ self.kwargs['method'] = 'L-BFGS-B'
410
+ self.kwargs['options'] = {
411
+ 'maxiter': ls_max_iter,
412
+ }
413
+ self.kwargs['bounds'] = list(zip(self.lower, self.upper))
414
+ elif callable(self.jac):
415
+ def wrapped_jac(x):
416
+ return self.jac(x, *args)
417
+ self.kwargs['jac'] = wrapped_jac
418
+
419
+ def local_search(self, x, e):
420
+ # Run local search from the given x location where energy value is e
421
+ x_tmp = np.copy(x)
422
+ mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
423
+ if 'njev' in mres:
424
+ self.func_wrapper.ngev += mres.njev
425
+ if 'nhev' in mres:
426
+ self.func_wrapper.nhev += mres.nhev
427
+ # Check if is valid value
428
+ is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
429
+ in_bounds = np.all(mres.x >= self.lower) and np.all(
430
+ mres.x <= self.upper)
431
+ is_valid = is_finite and in_bounds
432
+
433
+ # Use the new point only if it is valid and return a better results
434
+ if is_valid and mres.fun < e:
435
+ return mres.fun, mres.x
436
+ else:
437
+ return e, x_tmp
438
+
439
+
440
+ def dual_annealing(func, bounds, args=(), maxiter=1000,
441
+ minimizer_kwargs=None, initial_temp=5230.,
442
+ restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
443
+ maxfun=1e7, seed=None, no_local_search=False,
444
+ callback=None, x0=None):
445
+ """
446
+ Find the global minimum of a function using Dual Annealing.
447
+
448
+ Parameters
449
+ ----------
450
+ func : callable
451
+ The objective function to be minimized. Must be in the form
452
+ ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
453
+ and ``args`` is a tuple of any additional fixed parameters needed to
454
+ completely specify the function.
455
+ bounds : sequence or `Bounds`
456
+ Bounds for variables. There are two ways to specify the bounds:
457
+
458
+ 1. Instance of `Bounds` class.
459
+ 2. Sequence of ``(min, max)`` pairs for each element in `x`.
460
+
461
+ args : tuple, optional
462
+ Any additional fixed parameters needed to completely specify the
463
+ objective function.
464
+ maxiter : int, optional
465
+ The maximum number of global search iterations. Default value is 1000.
466
+ minimizer_kwargs : dict, optional
467
+ Extra keyword arguments to be passed to the local minimizer
468
+ (`minimize`). Some important options could be:
469
+ ``method`` for the minimizer method to use and ``args`` for
470
+ objective function additional arguments.
471
+ initial_temp : float, optional
472
+ The initial temperature, use higher values to facilitates a wider
473
+ search of the energy landscape, allowing dual_annealing to escape
474
+ local minima that it is trapped in. Default value is 5230. Range is
475
+ (0.01, 5.e4].
476
+ restart_temp_ratio : float, optional
477
+ During the annealing process, temperature is decreasing, when it
478
+ reaches ``initial_temp * restart_temp_ratio``, the reannealing process
479
+ is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
480
+ visit : float, optional
481
+ Parameter for visiting distribution. Default value is 2.62. Higher
482
+ values give the visiting distribution a heavier tail, this makes
483
+ the algorithm jump to a more distant region. The value range is (1, 3].
484
+ accept : float, optional
485
+ Parameter for acceptance distribution. It is used to control the
486
+ probability of acceptance. The lower the acceptance parameter, the
487
+ smaller the probability of acceptance. Default value is -5.0 with
488
+ a range (-1e4, -5].
489
+ maxfun : int, optional
490
+ Soft limit for the number of objective function calls. If the
491
+ algorithm is in the middle of a local search, this number will be
492
+ exceeded, the algorithm will stop just after the local search is
493
+ done. Default value is 1e7.
494
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
495
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
496
+ singleton is used.
497
+ If `seed` is an int, a new ``RandomState`` instance is used,
498
+ seeded with `seed`.
499
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
500
+ that instance is used.
501
+ Specify `seed` for repeatable minimizations. The random numbers
502
+ generated with this seed only affect the visiting distribution function
503
+ and new coordinates generation.
504
+ no_local_search : bool, optional
505
+ If `no_local_search` is set to True, a traditional Generalized
506
+ Simulated Annealing will be performed with no local search
507
+ strategy applied.
508
+ callback : callable, optional
509
+ A callback function with signature ``callback(x, f, context)``,
510
+ which will be called for all minima found.
511
+ ``x`` and ``f`` are the coordinates and function value of the
512
+ latest minimum found, and ``context`` has value in [0, 1, 2], with the
513
+ following meaning:
514
+
515
+ - 0: minimum detected in the annealing process.
516
+ - 1: detection occurred in the local search process.
517
+ - 2: detection done in the dual annealing process.
518
+
519
+ If the callback implementation returns True, the algorithm will stop.
520
+ x0 : ndarray, shape(n,), optional
521
+ Coordinates of a single N-D starting point.
522
+
523
+ Returns
524
+ -------
525
+ res : OptimizeResult
526
+ The optimization result represented as a `OptimizeResult` object.
527
+ Important attributes are: ``x`` the solution array, ``fun`` the value
528
+ of the function at the solution, and ``message`` which describes the
529
+ cause of the termination.
530
+ See `OptimizeResult` for a description of other attributes.
531
+
532
+ Notes
533
+ -----
534
+ This function implements the Dual Annealing optimization. This stochastic
535
+ approach derived from [3]_ combines the generalization of CSA (Classical
536
+ Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
537
+ to a strategy for applying a local search on accepted locations [4]_.
538
+ An alternative implementation of this same algorithm is described in [5]_
539
+ and benchmarks are presented in [6]_. This approach introduces an advanced
540
+ method to refine the solution found by the generalized annealing
541
+ process. This algorithm uses a distorted Cauchy-Lorentz visiting
542
+ distribution, with its shape controlled by the parameter :math:`q_{v}`
543
+
544
+ .. math::
545
+
546
+ g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
547
+ \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
548
+ \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
549
+ \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
550
+ \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
551
+
552
+ Where :math:`t` is the artificial time. This visiting distribution is used
553
+ to generate a trial jump distance :math:`\\Delta x(t)` of variable
554
+ :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
555
+
556
+ From the starting point, after calling the visiting distribution
557
+ function, the acceptance probability is computed as follows:
558
+
559
+ .. math::
560
+
561
+ p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
562
+ \\frac{1}{1-q_{a}}}\\}}
563
+
564
+ Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
565
+ acceptance probability is assigned to the cases where
566
+
567
+ .. math::
568
+
569
+ [1-(1-q_{a}) \\beta \\Delta E] < 0
570
+
571
+ The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
572
+
573
+ .. math::
574
+
575
+ T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
576
+ 1 + t\\right)^{q_{v}-1}-1}
577
+
578
+ Where :math:`q_{v}` is the visiting parameter.
579
+
580
+ .. versionadded:: 1.2.0
581
+
582
+ References
583
+ ----------
584
+ .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
585
+ statistics. Journal of Statistical Physics, 52, 479-487 (1998).
586
+ .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
587
+ Physica A, 233, 395-406 (1996).
588
+ .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
589
+ Annealing Algorithm and Its Application to the Thomson Model.
590
+ Physics Letters A, 233, 216-220 (1997).
591
+ .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
592
+ Annealing. Physical Review E, 62, 4473 (2000).
593
+ .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
594
+ Simulated Annealing for Efficient Global Optimization: the GenSA
595
+ Package for R. The R Journal, Volume 5/1 (2013).
596
+ .. [6] Mullen, K. Continuous Global Optimization in R. Journal of
597
+ Statistical Software, 60(6), 1 - 45, (2014).
598
+ :doi:`10.18637/jss.v060.i06`
599
+
600
+ Examples
601
+ --------
602
+ The following example is a 10-D problem, with many local minima.
603
+ The function involved is called Rastrigin
604
+ (https://en.wikipedia.org/wiki/Rastrigin_function)
605
+
606
+ >>> import numpy as np
607
+ >>> from scipy.optimize import dual_annealing
608
+ >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
609
+ >>> lw = [-5.12] * 10
610
+ >>> up = [5.12] * 10
611
+ >>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
612
+ >>> ret.x
613
+ array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
614
+ -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
615
+ -6.05775280e-09, -5.00668935e-09]) # random
616
+ >>> ret.fun
617
+ 0.000000
618
+
619
+ """
620
+
621
+ if isinstance(bounds, Bounds):
622
+ bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
623
+
624
+ if x0 is not None and not len(x0) == len(bounds):
625
+ raise ValueError('Bounds size does not match x0')
626
+
627
+ lu = list(zip(*bounds))
628
+ lower = np.array(lu[0])
629
+ upper = np.array(lu[1])
630
+ # Check that restart temperature ratio is correct
631
+ if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
632
+ raise ValueError('Restart temperature ratio has to be in range (0, 1)')
633
+ # Checking bounds are valid
634
+ if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
635
+ np.isnan(lower)) or np.any(np.isnan(upper))):
636
+ raise ValueError('Some bounds values are inf values or nan values')
637
+ # Checking that bounds are consistent
638
+ if not np.all(lower < upper):
639
+ raise ValueError('Bounds are not consistent min < max')
640
+ # Checking that bounds are the same length
641
+ if not len(lower) == len(upper):
642
+ raise ValueError('Bounds do not have the same dimensions')
643
+
644
+ # Wrapper for the objective function
645
+ func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
646
+
647
+ # minimizer_kwargs has to be a dict, not None
648
+ minimizer_kwargs = minimizer_kwargs or {}
649
+
650
+ minimizer_wrapper = LocalSearchWrapper(
651
+ bounds, func_wrapper, *args, **minimizer_kwargs)
652
+
653
+ # Initialization of random Generator for reproducible runs if seed provided
654
+ rand_state = check_random_state(seed)
655
+ # Initialization of the energy state
656
+ energy_state = EnergyState(lower, upper, callback)
657
+ energy_state.reset(func_wrapper, rand_state, x0)
658
+ # Minimum value of annealing temperature reached to perform
659
+ # re-annealing
660
+ temperature_restart = initial_temp * restart_temp_ratio
661
+ # VisitingDistribution instance
662
+ visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
663
+ # Strategy chain instance
664
+ strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
665
+ minimizer_wrapper, rand_state, energy_state)
666
+ need_to_stop = False
667
+ iteration = 0
668
+ message = []
669
+ # OptimizeResult object to be returned
670
+ optimize_res = OptimizeResult()
671
+ optimize_res.success = True
672
+ optimize_res.status = 0
673
+
674
+ t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
675
+ # Run the search loop
676
+ while not need_to_stop:
677
+ for i in range(maxiter):
678
+ # Compute temperature for this step
679
+ s = float(i) + 2.0
680
+ t2 = np.exp((visit - 1) * np.log(s)) - 1.0
681
+ temperature = initial_temp * t1 / t2
682
+ if iteration >= maxiter:
683
+ message.append("Maximum number of iteration reached")
684
+ need_to_stop = True
685
+ break
686
+ # Need a re-annealing process?
687
+ if temperature < temperature_restart:
688
+ energy_state.reset(func_wrapper, rand_state)
689
+ break
690
+ # starting strategy chain
691
+ val = strategy_chain.run(i, temperature)
692
+ if val is not None:
693
+ message.append(val)
694
+ need_to_stop = True
695
+ optimize_res.success = False
696
+ break
697
+ # Possible local search at the end of the strategy chain
698
+ if not no_local_search:
699
+ val = strategy_chain.local_search()
700
+ if val is not None:
701
+ message.append(val)
702
+ need_to_stop = True
703
+ optimize_res.success = False
704
+ break
705
+ iteration += 1
706
+
707
+ # Setting the OptimizeResult values
708
+ optimize_res.x = energy_state.xbest
709
+ optimize_res.fun = energy_state.ebest
710
+ optimize_res.nit = iteration
711
+ optimize_res.nfev = func_wrapper.nfev
712
+ optimize_res.njev = func_wrapper.ngev
713
+ optimize_res.nhev = func_wrapper.nhev
714
+ optimize_res.message = message
715
+ return optimize_res
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hessian update strategies for quasi-Newton optimization methods."""
2
+ import numpy as np
3
+ from numpy.linalg import norm
4
+ from scipy.linalg import get_blas_funcs
5
+ from warnings import warn
6
+
7
+
8
+ __all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
9
+
10
+
11
+ class HessianUpdateStrategy:
12
+ """Interface for implementing Hessian update strategies.
13
+
14
+ Many optimization methods make use of Hessian (or inverse Hessian)
15
+ approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
16
+ Some of these approximations, however, do not actually need to store
17
+ the entire matrix or can compute the internal matrix product with a
18
+ given vector in a very efficiently manner. This class serves as an
19
+ abstract interface between the optimization algorithm and the
20
+ quasi-Newton update strategies, giving freedom of implementation
21
+ to store and update the internal matrix as efficiently as possible.
22
+ Different choices of initialization and update procedure will result
23
+ in different quasi-Newton strategies.
24
+
25
+ Four methods should be implemented in derived classes: ``initialize``,
26
+ ``update``, ``dot`` and ``get_matrix``.
27
+
28
+ Notes
29
+ -----
30
+ Any instance of a class that implements this interface,
31
+ can be accepted by the method ``minimize`` and used by
32
+ the compatible solvers to approximate the Hessian (or
33
+ inverse Hessian) used by the optimization algorithms.
34
+ """
35
+
36
+ def initialize(self, n, approx_type):
37
+ """Initialize internal matrix.
38
+
39
+ Allocate internal memory for storing and updating
40
+ the Hessian or its inverse.
41
+
42
+ Parameters
43
+ ----------
44
+ n : int
45
+ Problem dimension.
46
+ approx_type : {'hess', 'inv_hess'}
47
+ Selects either the Hessian or the inverse Hessian.
48
+ When set to 'hess' the Hessian will be stored and updated.
49
+ When set to 'inv_hess' its inverse will be used instead.
50
+ """
51
+ raise NotImplementedError("The method ``initialize(n, approx_type)``"
52
+ " is not implemented.")
53
+
54
+ def update(self, delta_x, delta_grad):
55
+ """Update internal matrix.
56
+
57
+ Update Hessian matrix or its inverse (depending on how 'approx_type'
58
+ is defined) using information about the last evaluated points.
59
+
60
+ Parameters
61
+ ----------
62
+ delta_x : ndarray
63
+ The difference between two points the gradient
64
+ function have been evaluated at: ``delta_x = x2 - x1``.
65
+ delta_grad : ndarray
66
+ The difference between the gradients:
67
+ ``delta_grad = grad(x2) - grad(x1)``.
68
+ """
69
+ raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
70
+ " is not implemented.")
71
+
72
+ def dot(self, p):
73
+ """Compute the product of the internal matrix with the given vector.
74
+
75
+ Parameters
76
+ ----------
77
+ p : array_like
78
+ 1-D array representing a vector.
79
+
80
+ Returns
81
+ -------
82
+ Hp : array
83
+ 1-D represents the result of multiplying the approximation matrix
84
+ by vector p.
85
+ """
86
+ raise NotImplementedError("The method ``dot(p)``"
87
+ " is not implemented.")
88
+
89
+ def get_matrix(self):
90
+ """Return current internal matrix.
91
+
92
+ Returns
93
+ -------
94
+ H : ndarray, shape (n, n)
95
+ Dense matrix containing either the Hessian
96
+ or its inverse (depending on how 'approx_type'
97
+ is defined).
98
+ """
99
+ raise NotImplementedError("The method ``get_matrix(p)``"
100
+ " is not implemented.")
101
+
102
+
103
+ class FullHessianUpdateStrategy(HessianUpdateStrategy):
104
+ """Hessian update strategy with full dimensional internal representation.
105
+ """
106
+ _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update
107
+ _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update
108
+ # Symmetric matrix-vector product
109
+ _symv = get_blas_funcs('symv', dtype='d')
110
+
111
+ def __init__(self, init_scale='auto'):
112
+ self.init_scale = init_scale
113
+ # Until initialize is called we can't really use the class,
114
+ # so it makes sense to set everything to None.
115
+ self.first_iteration = None
116
+ self.approx_type = None
117
+ self.B = None
118
+ self.H = None
119
+
120
+ def initialize(self, n, approx_type):
121
+ """Initialize internal matrix.
122
+
123
+ Allocate internal memory for storing and updating
124
+ the Hessian or its inverse.
125
+
126
+ Parameters
127
+ ----------
128
+ n : int
129
+ Problem dimension.
130
+ approx_type : {'hess', 'inv_hess'}
131
+ Selects either the Hessian or the inverse Hessian.
132
+ When set to 'hess' the Hessian will be stored and updated.
133
+ When set to 'inv_hess' its inverse will be used instead.
134
+ """
135
+ self.first_iteration = True
136
+ self.n = n
137
+ self.approx_type = approx_type
138
+ if approx_type not in ('hess', 'inv_hess'):
139
+ raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
140
+ # Create matrix
141
+ if self.approx_type == 'hess':
142
+ self.B = np.eye(n, dtype=float)
143
+ else:
144
+ self.H = np.eye(n, dtype=float)
145
+
146
+ def _auto_scale(self, delta_x, delta_grad):
147
+ # Heuristic to scale matrix at first iteration.
148
+ # Described in Nocedal and Wright "Numerical Optimization"
149
+ # p.143 formula (6.20).
150
+ s_norm2 = np.dot(delta_x, delta_x)
151
+ y_norm2 = np.dot(delta_grad, delta_grad)
152
+ ys = np.abs(np.dot(delta_grad, delta_x))
153
+ if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
154
+ return 1
155
+ if self.approx_type == 'hess':
156
+ return y_norm2 / ys
157
+ else:
158
+ return ys / y_norm2
159
+
160
+ def _update_implementation(self, delta_x, delta_grad):
161
+ raise NotImplementedError("The method ``_update_implementation``"
162
+ " is not implemented.")
163
+
164
+ def update(self, delta_x, delta_grad):
165
+ """Update internal matrix.
166
+
167
+ Update Hessian matrix or its inverse (depending on how 'approx_type'
168
+ is defined) using information about the last evaluated points.
169
+
170
+ Parameters
171
+ ----------
172
+ delta_x : ndarray
173
+ The difference between two points the gradient
174
+ function have been evaluated at: ``delta_x = x2 - x1``.
175
+ delta_grad : ndarray
176
+ The difference between the gradients:
177
+ ``delta_grad = grad(x2) - grad(x1)``.
178
+ """
179
+ if np.all(delta_x == 0.0):
180
+ return
181
+ if np.all(delta_grad == 0.0):
182
+ warn('delta_grad == 0.0. Check if the approximated '
183
+ 'function is linear. If the function is linear '
184
+ 'better results can be obtained by defining the '
185
+ 'Hessian as zero instead of using quasi-Newton '
186
+ 'approximations.',
187
+ UserWarning, stacklevel=2)
188
+ return
189
+ if self.first_iteration:
190
+ # Get user specific scale
191
+ if self.init_scale == "auto":
192
+ scale = self._auto_scale(delta_x, delta_grad)
193
+ else:
194
+ scale = float(self.init_scale)
195
+ # Scale initial matrix with ``scale * np.eye(n)``
196
+ if self.approx_type == 'hess':
197
+ self.B *= scale
198
+ else:
199
+ self.H *= scale
200
+ self.first_iteration = False
201
+ self._update_implementation(delta_x, delta_grad)
202
+
203
+ def dot(self, p):
204
+ """Compute the product of the internal matrix with the given vector.
205
+
206
+ Parameters
207
+ ----------
208
+ p : array_like
209
+ 1-D array representing a vector.
210
+
211
+ Returns
212
+ -------
213
+ Hp : array
214
+ 1-D represents the result of multiplying the approximation matrix
215
+ by vector p.
216
+ """
217
+ if self.approx_type == 'hess':
218
+ return self._symv(1, self.B, p)
219
+ else:
220
+ return self._symv(1, self.H, p)
221
+
222
+ def get_matrix(self):
223
+ """Return the current internal matrix.
224
+
225
+ Returns
226
+ -------
227
+ M : ndarray, shape (n, n)
228
+ Dense matrix containing either the Hessian or its inverse
229
+ (depending on how `approx_type` was defined).
230
+ """
231
+ if self.approx_type == 'hess':
232
+ M = np.copy(self.B)
233
+ else:
234
+ M = np.copy(self.H)
235
+ li = np.tril_indices_from(M, k=-1)
236
+ M[li] = M.T[li]
237
+ return M
238
+
239
+
240
+ class BFGS(FullHessianUpdateStrategy):
241
+ """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
242
+
243
+ Parameters
244
+ ----------
245
+ exception_strategy : {'skip_update', 'damp_update'}, optional
246
+ Define how to proceed when the curvature condition is violated.
247
+ Set it to 'skip_update' to just skip the update. Or, alternatively,
248
+ set it to 'damp_update' to interpolate between the actual BFGS
249
+ result and the unmodified matrix. Both exceptions strategies
250
+ are explained in [1]_, p.536-537.
251
+ min_curvature : float
252
+ This number, scaled by a normalization factor, defines the
253
+ minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
254
+ unaffected by the exception strategy. By default is equal to
255
+ 1e-8 when ``exception_strategy = 'skip_update'`` and equal
256
+ to 0.2 when ``exception_strategy = 'damp_update'``.
257
+ init_scale : {float, 'auto'}
258
+ Matrix scale at first iteration. At the first
259
+ iteration the Hessian matrix or its inverse will be initialized
260
+ with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
261
+ Set it to 'auto' in order to use an automatic heuristic for choosing
262
+ the initial scale. The heuristic is described in [1]_, p.143.
263
+ By default uses 'auto'.
264
+
265
+ Notes
266
+ -----
267
+ The update is based on the description in [1]_, p.140.
268
+
269
+ References
270
+ ----------
271
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
272
+ Second Edition (2006).
273
+ """
274
+
275
+ def __init__(self, exception_strategy='skip_update', min_curvature=None,
276
+ init_scale='auto'):
277
+ if exception_strategy == 'skip_update':
278
+ if min_curvature is not None:
279
+ self.min_curvature = min_curvature
280
+ else:
281
+ self.min_curvature = 1e-8
282
+ elif exception_strategy == 'damp_update':
283
+ if min_curvature is not None:
284
+ self.min_curvature = min_curvature
285
+ else:
286
+ self.min_curvature = 0.2
287
+ else:
288
+ raise ValueError("`exception_strategy` must be 'skip_update' "
289
+ "or 'damp_update'.")
290
+
291
+ super().__init__(init_scale)
292
+ self.exception_strategy = exception_strategy
293
+
294
+ def _update_inverse_hessian(self, ys, Hy, yHy, s):
295
+ """Update the inverse Hessian matrix.
296
+
297
+ BFGS update using the formula:
298
+
299
+ ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
300
+ - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
301
+
302
+ where ``s = delta_x`` and ``y = delta_grad``. This formula is
303
+ equivalent to (6.17) in [1]_ written in a more efficient way
304
+ for implementation.
305
+
306
+ References
307
+ ----------
308
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
309
+ Second Edition (2006).
310
+ """
311
+ self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
312
+ self.H = self._syr((ys+yHy)/ys**2, s, a=self.H)
313
+
314
+ def _update_hessian(self, ys, Bs, sBs, y):
315
+ """Update the Hessian matrix.
316
+
317
+ BFGS update using the formula:
318
+
319
+ ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
320
+
321
+ where ``s`` is short for ``delta_x`` and ``y`` is short
322
+ for ``delta_grad``. Formula (6.19) in [1]_.
323
+
324
+ References
325
+ ----------
326
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
327
+ Second Edition (2006).
328
+ """
329
+ self.B = self._syr(1.0 / ys, y, a=self.B)
330
+ self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
331
+
332
+ def _update_implementation(self, delta_x, delta_grad):
333
+ # Auxiliary variables w and z
334
+ if self.approx_type == 'hess':
335
+ w = delta_x
336
+ z = delta_grad
337
+ else:
338
+ w = delta_grad
339
+ z = delta_x
340
+ # Do some common operations
341
+ wz = np.dot(w, z)
342
+ Mw = self.dot(w)
343
+ wMw = Mw.dot(w)
344
+ # Guarantee that wMw > 0 by reinitializing matrix.
345
+ # While this is always true in exact arithmetic,
346
+ # indefinite matrix may appear due to roundoff errors.
347
+ if wMw <= 0.0:
348
+ scale = self._auto_scale(delta_x, delta_grad)
349
+ # Reinitialize matrix
350
+ if self.approx_type == 'hess':
351
+ self.B = scale * np.eye(self.n, dtype=float)
352
+ else:
353
+ self.H = scale * np.eye(self.n, dtype=float)
354
+ # Do common operations for new matrix
355
+ Mw = self.dot(w)
356
+ wMw = Mw.dot(w)
357
+ # Check if curvature condition is violated
358
+ if wz <= self.min_curvature * wMw:
359
+ # If the option 'skip_update' is set
360
+ # we just skip the update when the condition
361
+ # is violated.
362
+ if self.exception_strategy == 'skip_update':
363
+ return
364
+ # If the option 'damp_update' is set we
365
+ # interpolate between the actual BFGS
366
+ # result and the unmodified matrix.
367
+ elif self.exception_strategy == 'damp_update':
368
+ update_factor = (1-self.min_curvature) / (1 - wz/wMw)
369
+ z = update_factor*z + (1-update_factor)*Mw
370
+ wz = np.dot(w, z)
371
+ # Update matrix
372
+ if self.approx_type == 'hess':
373
+ self._update_hessian(wz, Mw, wMw, z)
374
+ else:
375
+ self._update_inverse_hessian(wz, Mw, wMw, z)
376
+
377
+
378
+ class SR1(FullHessianUpdateStrategy):
379
+ """Symmetric-rank-1 Hessian update strategy.
380
+
381
+ Parameters
382
+ ----------
383
+ min_denominator : float
384
+ This number, scaled by a normalization factor,
385
+ defines the minimum denominator magnitude allowed
386
+ in the update. When the condition is violated we skip
387
+ the update. By default uses ``1e-8``.
388
+ init_scale : {float, 'auto'}, optional
389
+ Matrix scale at first iteration. At the first
390
+ iteration the Hessian matrix or its inverse will be initialized
391
+ with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
392
+ Set it to 'auto' in order to use an automatic heuristic for choosing
393
+ the initial scale. The heuristic is described in [1]_, p.143.
394
+ By default uses 'auto'.
395
+
396
+ Notes
397
+ -----
398
+ The update is based on the description in [1]_, p.144-146.
399
+
400
+ References
401
+ ----------
402
+ .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
403
+ Second Edition (2006).
404
+ """
405
+
406
+ def __init__(self, min_denominator=1e-8, init_scale='auto'):
407
+ self.min_denominator = min_denominator
408
+ super().__init__(init_scale)
409
+
410
+ def _update_implementation(self, delta_x, delta_grad):
411
+ # Auxiliary variables w and z
412
+ if self.approx_type == 'hess':
413
+ w = delta_x
414
+ z = delta_grad
415
+ else:
416
+ w = delta_grad
417
+ z = delta_x
418
+ # Do some common operations
419
+ Mw = self.dot(w)
420
+ z_minus_Mw = z - Mw
421
+ denominator = np.dot(w, z_minus_Mw)
422
+ # If the denominator is too small
423
+ # we just skip the update.
424
+ if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
425
+ return
426
+ # Update matrix
427
+ if self.approx_type == 'hess':
428
+ self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
429
+ else:
430
+ self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (36.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+ from libcpp.string cimport string
5
+ from libcpp.vector cimport vector
6
+
7
+ from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType
8
+ from .HighsSparseMatrix cimport HighsSparseMatrix
9
+
10
+
11
+ cdef extern from "HighsLp.h" nogil:
12
+ # From HiGHS/src/lp_data/HighsLp.h
13
+ cdef cppclass HighsLp:
14
+ int num_col_
15
+ int num_row_
16
+
17
+ vector[double] col_cost_
18
+ vector[double] col_lower_
19
+ vector[double] col_upper_
20
+ vector[double] row_lower_
21
+ vector[double] row_upper_
22
+
23
+ HighsSparseMatrix a_matrix_
24
+
25
+ ObjSense sense_
26
+ double offset_
27
+
28
+ string model_name_
29
+
30
+ vector[string] row_names_
31
+ vector[string] col_names_
32
+
33
+ vector[HighsVarType] integrality_
34
+
35
+ bool isMip() const
36
+
37
+ cdef cppclass HighsSolution:
38
+ vector[double] col_value
39
+ vector[double] col_dual
40
+ vector[double] row_value
41
+ vector[double] row_dual
42
+
43
+ cdef cppclass HighsBasis:
44
+ bool valid_
45
+ vector[HighsBasisStatus] col_status
46
+ vector[HighsBasisStatus] row_status
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (125 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ fmin_l_bfgs_b
8
+
9
+ """
10
+
11
+ ## License for the Python wrapper
12
+ ## ==============================
13
+
14
+ ## Copyright (c) 2004 David M. Cooke <[email protected]>
15
+
16
+ ## Permission is hereby granted, free of charge, to any person obtaining a
17
+ ## copy of this software and associated documentation files (the "Software"),
18
+ ## to deal in the Software without restriction, including without limitation
19
+ ## the rights to use, copy, modify, merge, publish, distribute, sublicense,
20
+ ## and/or sell copies of the Software, and to permit persons to whom the
21
+ ## Software is furnished to do so, subject to the following conditions:
22
+
23
+ ## The above copyright notice and this permission notice shall be included in
24
+ ## all copies or substantial portions of the Software.
25
+
26
+ ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
+ ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
+ ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29
+ ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30
+ ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31
+ ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
32
+ ## DEALINGS IN THE SOFTWARE.
33
+
34
+ ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
35
+
36
+ import numpy as np
37
+ from numpy import array, asarray, float64, zeros
38
+ from . import _lbfgsb
39
+ from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt,
40
+ _wrap_callback, _check_unknown_options,
41
+ _prepare_scalar_function)
42
+ from ._constraints import old_bound_to_new
43
+
44
+ from scipy.sparse.linalg import LinearOperator
45
+
46
+ __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
47
+
48
+
49
+ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
50
+ approx_grad=0,
51
+ bounds=None, m=10, factr=1e7, pgtol=1e-5,
52
+ epsilon=1e-8,
53
+ iprint=-1, maxfun=15000, maxiter=15000, disp=None,
54
+ callback=None, maxls=20):
55
+ """
56
+ Minimize a function func using the L-BFGS-B algorithm.
57
+
58
+ Parameters
59
+ ----------
60
+ func : callable f(x,*args)
61
+ Function to minimize.
62
+ x0 : ndarray
63
+ Initial guess.
64
+ fprime : callable fprime(x,*args), optional
65
+ The gradient of `func`. If None, then `func` returns the function
66
+ value and the gradient (``f, g = func(x, *args)``), unless
67
+ `approx_grad` is True in which case `func` returns only ``f``.
68
+ args : sequence, optional
69
+ Arguments to pass to `func` and `fprime`.
70
+ approx_grad : bool, optional
71
+ Whether to approximate the gradient numerically (in which case
72
+ `func` returns only the function value).
73
+ bounds : list, optional
74
+ ``(min, max)`` pairs for each element in ``x``, defining
75
+ the bounds on that parameter. Use None or +-inf for one of ``min`` or
76
+ ``max`` when there is no bound in that direction.
77
+ m : int, optional
78
+ The maximum number of variable metric corrections
79
+ used to define the limited memory matrix. (The limited memory BFGS
80
+ method does not store the full hessian but uses this many terms in an
81
+ approximation to it.)
82
+ factr : float, optional
83
+ The iteration stops when
84
+ ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
85
+ where ``eps`` is the machine precision, which is automatically
86
+ generated by the code. Typical values for `factr` are: 1e12 for
87
+ low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
88
+ high accuracy. See Notes for relationship to `ftol`, which is exposed
89
+ (instead of `factr`) by the `scipy.optimize.minimize` interface to
90
+ L-BFGS-B.
91
+ pgtol : float, optional
92
+ The iteration will stop when
93
+ ``max{|proj g_i | i = 1, ..., n} <= pgtol``
94
+ where ``proj g_i`` is the i-th component of the projected gradient.
95
+ epsilon : float, optional
96
+ Step size used when `approx_grad` is True, for numerically
97
+ calculating the gradient
98
+ iprint : int, optional
99
+ Controls the frequency of output. ``iprint < 0`` means no output;
100
+ ``iprint = 0`` print only one line at the last iteration;
101
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
102
+ ``iprint = 99`` print details of every iteration except n-vectors;
103
+ ``iprint = 100`` print also the changes of active set and final x;
104
+ ``iprint > 100`` print details of every iteration including x and g.
105
+ disp : int, optional
106
+ If zero, then no output. If a positive number, then this over-rides
107
+ `iprint` (i.e., `iprint` gets the value of `disp`).
108
+ maxfun : int, optional
109
+ Maximum number of function evaluations. Note that this function
110
+ may violate the limit because of evaluating gradients by numerical
111
+ differentiation.
112
+ maxiter : int, optional
113
+ Maximum number of iterations.
114
+ callback : callable, optional
115
+ Called after each iteration, as ``callback(xk)``, where ``xk`` is the
116
+ current parameter vector.
117
+ maxls : int, optional
118
+ Maximum number of line search steps (per iteration). Default is 20.
119
+
120
+ Returns
121
+ -------
122
+ x : array_like
123
+ Estimated position of the minimum.
124
+ f : float
125
+ Value of `func` at the minimum.
126
+ d : dict
127
+ Information dictionary.
128
+
129
+ * d['warnflag'] is
130
+
131
+ - 0 if converged,
132
+ - 1 if too many function evaluations or too many iterations,
133
+ - 2 if stopped for another reason, given in d['task']
134
+
135
+ * d['grad'] is the gradient at the minimum (should be 0 ish)
136
+ * d['funcalls'] is the number of function calls made.
137
+ * d['nit'] is the number of iterations.
138
+
139
+ See also
140
+ --------
141
+ minimize: Interface to minimization algorithms for multivariate
142
+ functions. See the 'L-BFGS-B' `method` in particular. Note that the
143
+ `ftol` option is made available via that interface, while `factr` is
144
+ provided via this interface, where `factr` is the factor multiplying
145
+ the default machine floating-point precision to arrive at `ftol`:
146
+ ``ftol = factr * numpy.finfo(float).eps``.
147
+
148
+ Notes
149
+ -----
150
+ License of L-BFGS-B (FORTRAN code):
151
+
152
+ The version included here (in fortran code) is 3.0
153
+ (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
154
+ and Jorge Nocedal <[email protected]>. It carries the following
155
+ condition for use:
156
+
157
+ This software is freely available, but we expect that all publications
158
+ describing work using this software, or all commercial products using it,
159
+ quote at least one of the references given below. This software is released
160
+ under the BSD License.
161
+
162
+ References
163
+ ----------
164
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
165
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
166
+ Statistical Computing, 16, 5, pp. 1190-1208.
167
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
168
+ FORTRAN routines for large scale bound constrained optimization (1997),
169
+ ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
170
+ * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
171
+ FORTRAN routines for large scale bound constrained optimization (2011),
172
+ ACM Transactions on Mathematical Software, 38, 1.
173
+
174
+ Examples
175
+ --------
176
+ Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define
177
+ an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the
178
+ observations and `y_model` the prediction of the linear model as
179
+ ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily
180
+ chosen as ``(0,5)`` and ``(5,10)`` for this example.
181
+
182
+ >>> import numpy as np
183
+ >>> from scipy.optimize import fmin_l_bfgs_b
184
+ >>> X = np.arange(0, 10, 1)
185
+ >>> M = 2
186
+ >>> B = 3
187
+ >>> Y = M * X + B
188
+ >>> def func(parameters, *args):
189
+ ... x = args[0]
190
+ ... y = args[1]
191
+ ... m, b = parameters
192
+ ... y_model = m*x + b
193
+ ... error = sum(np.power((y - y_model), 2))
194
+ ... return error
195
+
196
+ >>> initial_values = np.array([0.0, 1.0])
197
+
198
+ >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
199
+ ... approx_grad=True)
200
+ >>> x_opt, f_opt
201
+ array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary
202
+
203
+ The optimized parameters in ``x_opt`` agree with the ground truth parameters
204
+ ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds`
205
+ parameter.
206
+
207
+ >>> bounds = [(0, 5), (5, 10)]
208
+ >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
209
+ ... approx_grad=True, bounds=bounds)
210
+ >>> x_opt, f_opt
211
+ array([1.65990508, 5.31649385]), 15.721334516453945 # may vary
212
+ """
213
+ # handle fprime/approx_grad
214
+ if approx_grad:
215
+ fun = func
216
+ jac = None
217
+ elif fprime is None:
218
+ fun = MemoizeJac(func)
219
+ jac = fun.derivative
220
+ else:
221
+ fun = func
222
+ jac = fprime
223
+
224
+ # build options
225
+ callback = _wrap_callback(callback)
226
+ opts = {'disp': disp,
227
+ 'iprint': iprint,
228
+ 'maxcor': m,
229
+ 'ftol': factr * np.finfo(float).eps,
230
+ 'gtol': pgtol,
231
+ 'eps': epsilon,
232
+ 'maxfun': maxfun,
233
+ 'maxiter': maxiter,
234
+ 'callback': callback,
235
+ 'maxls': maxls}
236
+
237
+ res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
238
+ **opts)
239
+ d = {'grad': res['jac'],
240
+ 'task': res['message'],
241
+ 'funcalls': res['nfev'],
242
+ 'nit': res['nit'],
243
+ 'warnflag': res['status']}
244
+ f = res['fun']
245
+ x = res['x']
246
+
247
+ return x, f, d
248
+
249
+
250
+ def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
251
+ disp=None, maxcor=10, ftol=2.2204460492503131e-09,
252
+ gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
253
+ iprint=-1, callback=None, maxls=20,
254
+ finite_diff_rel_step=None, **unknown_options):
255
+ """
256
+ Minimize a scalar function of one or more variables using the L-BFGS-B
257
+ algorithm.
258
+
259
+ Options
260
+ -------
261
+ disp : None or int
262
+ If `disp is None` (the default), then the supplied version of `iprint`
263
+ is used. If `disp is not None`, then it overrides the supplied version
264
+ of `iprint` with the behaviour you outlined.
265
+ maxcor : int
266
+ The maximum number of variable metric corrections used to
267
+ define the limited memory matrix. (The limited memory BFGS
268
+ method does not store the full hessian but uses this many terms
269
+ in an approximation to it.)
270
+ ftol : float
271
+ The iteration stops when ``(f^k -
272
+ f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
273
+ gtol : float
274
+ The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
275
+ <= gtol`` where ``proj g_i`` is the i-th component of the
276
+ projected gradient.
277
+ eps : float or ndarray
278
+ If `jac is None` the absolute step size used for numerical
279
+ approximation of the jacobian via forward differences.
280
+ maxfun : int
281
+ Maximum number of function evaluations. Note that this function
282
+ may violate the limit because of evaluating gradients by numerical
283
+ differentiation.
284
+ maxiter : int
285
+ Maximum number of iterations.
286
+ iprint : int, optional
287
+ Controls the frequency of output. ``iprint < 0`` means no output;
288
+ ``iprint = 0`` print only one line at the last iteration;
289
+ ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
290
+ ``iprint = 99`` print details of every iteration except n-vectors;
291
+ ``iprint = 100`` print also the changes of active set and final x;
292
+ ``iprint > 100`` print details of every iteration including x and g.
293
+ maxls : int, optional
294
+ Maximum number of line search steps (per iteration). Default is 20.
295
+ finite_diff_rel_step : None or array_like, optional
296
+ If `jac in ['2-point', '3-point', 'cs']` the relative step size to
297
+ use for numerical approximation of the jacobian. The absolute step
298
+ size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
299
+ possibly adjusted to fit into the bounds. For ``method='3-point'``
300
+ the sign of `h` is ignored. If None (default) then step is selected
301
+ automatically.
302
+
303
+ Notes
304
+ -----
305
+ The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
306
+ but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
307
+ relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
308
+ I.e., `factr` multiplies the default machine floating-point precision to
309
+ arrive at `ftol`.
310
+
311
+ """
312
+ _check_unknown_options(unknown_options)
313
+ m = maxcor
314
+ pgtol = gtol
315
+ factr = ftol / np.finfo(float).eps
316
+
317
+ x0 = asarray(x0).ravel()
318
+ n, = x0.shape
319
+
320
+ # historically old-style bounds were/are expected by lbfgsb.
321
+ # That's still the case but we'll deal with new-style from here on,
322
+ # it's easier
323
+ if bounds is None:
324
+ pass
325
+ elif len(bounds) != n:
326
+ raise ValueError('length of x0 != length of bounds')
327
+ else:
328
+ bounds = np.array(old_bound_to_new(bounds))
329
+
330
+ # check bounds
331
+ if (bounds[0] > bounds[1]).any():
332
+ raise ValueError(
333
+ "LBFGSB - one of the lower bounds is greater than an upper bound."
334
+ )
335
+
336
+ # initial vector must lie within the bounds. Otherwise ScalarFunction and
337
+ # approx_derivative will cause problems
338
+ x0 = np.clip(x0, bounds[0], bounds[1])
339
+
340
+ if disp is not None:
341
+ if disp == 0:
342
+ iprint = -1
343
+ else:
344
+ iprint = disp
345
+
346
+ # _prepare_scalar_function can use bounds=None to represent no bounds
347
+ sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
348
+ bounds=bounds,
349
+ finite_diff_rel_step=finite_diff_rel_step)
350
+
351
+ func_and_grad = sf.fun_and_grad
352
+
353
+ fortran_int = _lbfgsb.types.intvar.dtype
354
+
355
+ nbd = zeros(n, fortran_int)
356
+ low_bnd = zeros(n, float64)
357
+ upper_bnd = zeros(n, float64)
358
+ bounds_map = {(-np.inf, np.inf): 0,
359
+ (1, np.inf): 1,
360
+ (1, 1): 2,
361
+ (-np.inf, 1): 3}
362
+
363
+ if bounds is not None:
364
+ for i in range(0, n):
365
+ l, u = bounds[0, i], bounds[1, i]
366
+ if not np.isinf(l):
367
+ low_bnd[i] = l
368
+ l = 1
369
+ if not np.isinf(u):
370
+ upper_bnd[i] = u
371
+ u = 1
372
+ nbd[i] = bounds_map[l, u]
373
+
374
+ if not maxls > 0:
375
+ raise ValueError('maxls must be positive.')
376
+
377
+ x = array(x0, float64)
378
+ f = array(0.0, float64)
379
+ g = zeros((n,), float64)
380
+ wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
381
+ iwa = zeros(3*n, fortran_int)
382
+ task = zeros(1, 'S60')
383
+ csave = zeros(1, 'S60')
384
+ lsave = zeros(4, fortran_int)
385
+ isave = zeros(44, fortran_int)
386
+ dsave = zeros(29, float64)
387
+
388
+ task[:] = 'START'
389
+
390
+ n_iterations = 0
391
+
392
+ while 1:
393
+ # g may become float32 if a user provides a function that calculates
394
+ # the Jacobian in float32 (see gh-18730). The underlying Fortran code
395
+ # expects float64, so upcast it
396
+ g = g.astype(np.float64)
397
+ # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
398
+ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
399
+ pgtol, wa, iwa, task, iprint, csave, lsave,
400
+ isave, dsave, maxls)
401
+ task_str = task.tobytes()
402
+ if task_str.startswith(b'FG'):
403
+ # The minimization routine wants f and g at the current x.
404
+ # Note that interruptions due to maxfun are postponed
405
+ # until the completion of the current minimization iteration.
406
+ # Overwrite f and g:
407
+ f, g = func_and_grad(x)
408
+ elif task_str.startswith(b'NEW_X'):
409
+ # new iteration
410
+ n_iterations += 1
411
+
412
+ intermediate_result = OptimizeResult(x=x, fun=f)
413
+ if _call_callback_maybe_halt(callback, intermediate_result):
414
+ task[:] = 'STOP: CALLBACK REQUESTED HALT'
415
+ if n_iterations >= maxiter:
416
+ task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
417
+ elif sf.nfev > maxfun:
418
+ task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
419
+ 'EXCEEDS LIMIT')
420
+ else:
421
+ break
422
+
423
+ task_str = task.tobytes().strip(b'\x00').strip()
424
+ if task_str.startswith(b'CONV'):
425
+ warnflag = 0
426
+ elif sf.nfev > maxfun or n_iterations >= maxiter:
427
+ warnflag = 1
428
+ else:
429
+ warnflag = 2
430
+
431
+ # These two portions of the workspace are described in the mainlb
432
+ # subroutine in lbfgsb.f. See line 363.
433
+ s = wa[0: m*n].reshape(m, n)
434
+ y = wa[m*n: 2*m*n].reshape(m, n)
435
+
436
+ # See lbfgsb.f line 160 for this portion of the workspace.
437
+ # isave(31) = the total number of BFGS updates prior the current iteration;
438
+ n_bfgs_updates = isave[30]
439
+
440
+ n_corrs = min(n_bfgs_updates, maxcor)
441
+ hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
442
+
443
+ task_str = task_str.decode()
444
+ return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
445
+ njev=sf.ngev,
446
+ nit=n_iterations, status=warnflag, message=task_str,
447
+ x=x, success=(warnflag == 0), hess_inv=hess_inv)
448
+
449
+
450
+ class LbfgsInvHessProduct(LinearOperator):
451
+ """Linear operator for the L-BFGS approximate inverse Hessian.
452
+
453
+ This operator computes the product of a vector with the approximate inverse
454
+ of the Hessian of the objective function, using the L-BFGS limited
455
+ memory approximation to the inverse Hessian, accumulated during the
456
+ optimization.
457
+
458
+ Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
459
+ interface.
460
+
461
+ Parameters
462
+ ----------
463
+ sk : array_like, shape=(n_corr, n)
464
+ Array of `n_corr` most recent updates to the solution vector.
465
+ (See [1]).
466
+ yk : array_like, shape=(n_corr, n)
467
+ Array of `n_corr` most recent updates to the gradient. (See [1]).
468
+
469
+ References
470
+ ----------
471
+ .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
472
+ storage." Mathematics of computation 35.151 (1980): 773-782.
473
+
474
+ """
475
+
476
+ def __init__(self, sk, yk):
477
+ """Construct the operator."""
478
+ if sk.shape != yk.shape or sk.ndim != 2:
479
+ raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
480
+ n_corrs, n = sk.shape
481
+
482
+ super().__init__(dtype=np.float64, shape=(n, n))
483
+
484
+ self.sk = sk
485
+ self.yk = yk
486
+ self.n_corrs = n_corrs
487
+ self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
488
+
489
+ def _matvec(self, x):
490
+ """Efficient matrix-vector multiply with the BFGS matrices.
491
+
492
+ This calculation is described in Section (4) of [1].
493
+
494
+ Parameters
495
+ ----------
496
+ x : ndarray
497
+ An array with shape (n,) or (n,1).
498
+
499
+ Returns
500
+ -------
501
+ y : ndarray
502
+ The matrix-vector product
503
+
504
+ """
505
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
506
+ q = np.array(x, dtype=self.dtype, copy=True)
507
+ if q.ndim == 2 and q.shape[1] == 1:
508
+ q = q.reshape(-1)
509
+
510
+ alpha = np.empty(n_corrs)
511
+
512
+ for i in range(n_corrs-1, -1, -1):
513
+ alpha[i] = rho[i] * np.dot(s[i], q)
514
+ q = q - alpha[i]*y[i]
515
+
516
+ r = q
517
+ for i in range(n_corrs):
518
+ beta = rho[i] * np.dot(y[i], r)
519
+ r = r + s[i] * (alpha[i] - beta)
520
+
521
+ return r
522
+
523
+ def todense(self):
524
+ """Return a dense array representation of this operator.
525
+
526
+ Returns
527
+ -------
528
+ arr : ndarray, shape=(n, n)
529
+ An array with the same shape and containing
530
+ the same data represented by this `LinearOperator`.
531
+
532
+ """
533
+ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
534
+ I = np.eye(*self.shape, dtype=self.dtype)
535
+ Hk = I
536
+
537
+ for i in range(n_corrs):
538
+ A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
539
+ A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
540
+
541
+ Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
542
+ s[i][np.newaxis, :])
543
+ return Hk
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linesearch.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions
3
+ ---------
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ line_search_armijo
8
+ line_search_wolfe1
9
+ line_search_wolfe2
10
+ scalar_search_wolfe1
11
+ scalar_search_wolfe2
12
+
13
+ """
14
+ from warnings import warn
15
+
16
+ from scipy.optimize import _minpack2 as minpack2 # noqa: F401
17
+ from ._dcsrch import DCSRCH
18
+ import numpy as np
19
+
20
+ __all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
21
+ 'scalar_search_wolfe1', 'scalar_search_wolfe2',
22
+ 'line_search_armijo']
23
+
24
+ class LineSearchWarning(RuntimeWarning):
25
+ pass
26
+
27
+
28
+ def _check_c1_c2(c1, c2):
29
+ if not (0 < c1 < c2 < 1):
30
+ raise ValueError("'c1' and 'c2' do not satisfy"
31
+ "'0 < c1 < c2 < 1'.")
32
+
33
+
34
+ #------------------------------------------------------------------------------
35
+ # Minpack's Wolfe line and scalar searches
36
+ #------------------------------------------------------------------------------
37
+
38
+ def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
39
+ old_fval=None, old_old_fval=None,
40
+ args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
41
+ xtol=1e-14):
42
+ """
43
+ As `scalar_search_wolfe1` but do a line search to direction `pk`
44
+
45
+ Parameters
46
+ ----------
47
+ f : callable
48
+ Function `f(x)`
49
+ fprime : callable
50
+ Gradient of `f`
51
+ xk : array_like
52
+ Current point
53
+ pk : array_like
54
+ Search direction
55
+ gfk : array_like, optional
56
+ Gradient of `f` at point `xk`
57
+ old_fval : float, optional
58
+ Value of `f` at point `xk`
59
+ old_old_fval : float, optional
60
+ Value of `f` at point preceding `xk`
61
+
62
+ The rest of the parameters are the same as for `scalar_search_wolfe1`.
63
+
64
+ Returns
65
+ -------
66
+ stp, f_count, g_count, fval, old_fval
67
+ As in `line_search_wolfe1`
68
+ gval : array
69
+ Gradient of `f` at the final point
70
+
71
+ Notes
72
+ -----
73
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``.
74
+
75
+ """
76
+ if gfk is None:
77
+ gfk = fprime(xk, *args)
78
+
79
+ gval = [gfk]
80
+ gc = [0]
81
+ fc = [0]
82
+
83
+ def phi(s):
84
+ fc[0] += 1
85
+ return f(xk + s*pk, *args)
86
+
87
+ def derphi(s):
88
+ gval[0] = fprime(xk + s*pk, *args)
89
+ gc[0] += 1
90
+ return np.dot(gval[0], pk)
91
+
92
+ derphi0 = np.dot(gfk, pk)
93
+
94
+ stp, fval, old_fval = scalar_search_wolfe1(
95
+ phi, derphi, old_fval, old_old_fval, derphi0,
96
+ c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
97
+
98
+ return stp, fc[0], gc[0], fval, old_fval, gval[0]
99
+
100
+
101
+ def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
102
+ c1=1e-4, c2=0.9,
103
+ amax=50, amin=1e-8, xtol=1e-14):
104
+ """
105
+ Scalar function search for alpha that satisfies strong Wolfe conditions
106
+
107
+ alpha > 0 is assumed to be a descent direction.
108
+
109
+ Parameters
110
+ ----------
111
+ phi : callable phi(alpha)
112
+ Function at point `alpha`
113
+ derphi : callable phi'(alpha)
114
+ Objective function derivative. Returns a scalar.
115
+ phi0 : float, optional
116
+ Value of phi at 0
117
+ old_phi0 : float, optional
118
+ Value of phi at previous point
119
+ derphi0 : float, optional
120
+ Value derphi at 0
121
+ c1 : float, optional
122
+ Parameter for Armijo condition rule.
123
+ c2 : float, optional
124
+ Parameter for curvature condition rule.
125
+ amax, amin : float, optional
126
+ Maximum and minimum step size
127
+ xtol : float, optional
128
+ Relative tolerance for an acceptable step.
129
+
130
+ Returns
131
+ -------
132
+ alpha : float
133
+ Step size, or None if no suitable step was found
134
+ phi : float
135
+ Value of `phi` at the new point `alpha`
136
+ phi0 : float
137
+ Value of `phi` at `alpha=0`
138
+
139
+ Notes
140
+ -----
141
+ Uses routine DCSRCH from MINPACK.
142
+
143
+ Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_.
144
+
145
+ References
146
+ ----------
147
+
148
+ .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization.
149
+ In Springer Series in Operations Research and Financial Engineering.
150
+ (Springer Series in Operations Research and Financial Engineering).
151
+ Springer Nature.
152
+
153
+ """
154
+ _check_c1_c2(c1, c2)
155
+
156
+ if phi0 is None:
157
+ phi0 = phi(0.)
158
+ if derphi0 is None:
159
+ derphi0 = derphi(0.)
160
+
161
+ if old_phi0 is not None and derphi0 != 0:
162
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
163
+ if alpha1 < 0:
164
+ alpha1 = 1.0
165
+ else:
166
+ alpha1 = 1.0
167
+
168
+ maxiter = 100
169
+
170
+ dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
171
+ stp, phi1, phi0, task = dcsrch(
172
+ alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter
173
+ )
174
+
175
+ return stp, phi1, phi0
176
+
177
+
178
+ line_search = line_search_wolfe1
179
+
180
+
181
+ #------------------------------------------------------------------------------
182
+ # Pure-Python Wolfe line and scalar searches
183
+ #------------------------------------------------------------------------------
184
+
185
+ # Note: `line_search_wolfe2` is the public `scipy.optimize.line_search`
186
+
187
+ def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
188
+ old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
189
+ extra_condition=None, maxiter=10):
190
+ """Find alpha that satisfies strong Wolfe conditions.
191
+
192
+ Parameters
193
+ ----------
194
+ f : callable f(x,*args)
195
+ Objective function.
196
+ myfprime : callable f'(x,*args)
197
+ Objective function gradient.
198
+ xk : ndarray
199
+ Starting point.
200
+ pk : ndarray
201
+ Search direction. The search direction must be a descent direction
202
+ for the algorithm to converge.
203
+ gfk : ndarray, optional
204
+ Gradient value for x=xk (xk being the current parameter
205
+ estimate). Will be recomputed if omitted.
206
+ old_fval : float, optional
207
+ Function value for x=xk. Will be recomputed if omitted.
208
+ old_old_fval : float, optional
209
+ Function value for the point preceding x=xk.
210
+ args : tuple, optional
211
+ Additional arguments passed to objective function.
212
+ c1 : float, optional
213
+ Parameter for Armijo condition rule.
214
+ c2 : float, optional
215
+ Parameter for curvature condition rule.
216
+ amax : float, optional
217
+ Maximum step size
218
+ extra_condition : callable, optional
219
+ A callable of the form ``extra_condition(alpha, x, f, g)``
220
+ returning a boolean. Arguments are the proposed step ``alpha``
221
+ and the corresponding ``x``, ``f`` and ``g`` values. The line search
222
+ accepts the value of ``alpha`` only if this
223
+ callable returns ``True``. If the callable returns ``False``
224
+ for the step length, the algorithm will continue with
225
+ new iterates. The callable is only called for iterates
226
+ satisfying the strong Wolfe conditions.
227
+ maxiter : int, optional
228
+ Maximum number of iterations to perform.
229
+
230
+ Returns
231
+ -------
232
+ alpha : float or None
233
+ Alpha for which ``x_new = x0 + alpha * pk``,
234
+ or None if the line search algorithm did not converge.
235
+ fc : int
236
+ Number of function evaluations made.
237
+ gc : int
238
+ Number of gradient evaluations made.
239
+ new_fval : float or None
240
+ New function value ``f(x_new)=f(x0+alpha*pk)``,
241
+ or None if the line search algorithm did not converge.
242
+ old_fval : float
243
+ Old function value ``f(x0)``.
244
+ new_slope : float or None
245
+ The local slope along the search direction at the
246
+ new value ``<myfprime(x_new), pk>``,
247
+ or None if the line search algorithm did not converge.
248
+
249
+
250
+ Notes
251
+ -----
252
+ Uses the line search algorithm to enforce strong Wolfe
253
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
254
+ 1999, pp. 59-61.
255
+
256
+ The search direction `pk` must be a descent direction (e.g.
257
+ ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe
258
+ conditions. If the search direction is not a descent direction (e.g.
259
+ ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None.
260
+
261
+ Examples
262
+ --------
263
+ >>> import numpy as np
264
+ >>> from scipy.optimize import line_search
265
+
266
+ A objective function and its gradient are defined.
267
+
268
+ >>> def obj_func(x):
269
+ ... return (x[0])**2+(x[1])**2
270
+ >>> def obj_grad(x):
271
+ ... return [2*x[0], 2*x[1]]
272
+
273
+ We can find alpha that satisfies strong Wolfe conditions.
274
+
275
+ >>> start_point = np.array([1.8, 1.7])
276
+ >>> search_gradient = np.array([-1.0, -1.0])
277
+ >>> line_search(obj_func, obj_grad, start_point, search_gradient)
278
+ (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
279
+
280
+ """
281
+ fc = [0]
282
+ gc = [0]
283
+ gval = [None]
284
+ gval_alpha = [None]
285
+
286
+ def phi(alpha):
287
+ fc[0] += 1
288
+ return f(xk + alpha * pk, *args)
289
+
290
+ fprime = myfprime
291
+
292
+ def derphi(alpha):
293
+ gc[0] += 1
294
+ gval[0] = fprime(xk + alpha * pk, *args) # store for later use
295
+ gval_alpha[0] = alpha
296
+ return np.dot(gval[0], pk)
297
+
298
+ if gfk is None:
299
+ gfk = fprime(xk, *args)
300
+ derphi0 = np.dot(gfk, pk)
301
+
302
+ if extra_condition is not None:
303
+ # Add the current gradient as argument, to avoid needless
304
+ # re-evaluation
305
+ def extra_condition2(alpha, phi):
306
+ if gval_alpha[0] != alpha:
307
+ derphi(alpha)
308
+ x = xk + alpha * pk
309
+ return extra_condition(alpha, x, phi, gval[0])
310
+ else:
311
+ extra_condition2 = None
312
+
313
+ alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
314
+ phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
315
+ extra_condition2, maxiter=maxiter)
316
+
317
+ if derphi_star is None:
318
+ warn('The line search algorithm did not converge',
319
+ LineSearchWarning, stacklevel=2)
320
+ else:
321
+ # derphi_star is a number (derphi) -- so use the most recently
322
+ # calculated gradient used in computing it derphi = gfk*pk
323
+ # this is the gradient at the next step no need to compute it
324
+ # again in the outer loop.
325
+ derphi_star = gval[0]
326
+
327
+ return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
328
+
329
+
330
+ def scalar_search_wolfe2(phi, derphi, phi0=None,
331
+ old_phi0=None, derphi0=None,
332
+ c1=1e-4, c2=0.9, amax=None,
333
+ extra_condition=None, maxiter=10):
334
+ """Find alpha that satisfies strong Wolfe conditions.
335
+
336
+ alpha > 0 is assumed to be a descent direction.
337
+
338
+ Parameters
339
+ ----------
340
+ phi : callable phi(alpha)
341
+ Objective scalar function.
342
+ derphi : callable phi'(alpha)
343
+ Objective function derivative. Returns a scalar.
344
+ phi0 : float, optional
345
+ Value of phi at 0.
346
+ old_phi0 : float, optional
347
+ Value of phi at previous point.
348
+ derphi0 : float, optional
349
+ Value of derphi at 0
350
+ c1 : float, optional
351
+ Parameter for Armijo condition rule.
352
+ c2 : float, optional
353
+ Parameter for curvature condition rule.
354
+ amax : float, optional
355
+ Maximum step size.
356
+ extra_condition : callable, optional
357
+ A callable of the form ``extra_condition(alpha, phi_value)``
358
+ returning a boolean. The line search accepts the value
359
+ of ``alpha`` only if this callable returns ``True``.
360
+ If the callable returns ``False`` for the step length,
361
+ the algorithm will continue with new iterates.
362
+ The callable is only called for iterates satisfying
363
+ the strong Wolfe conditions.
364
+ maxiter : int, optional
365
+ Maximum number of iterations to perform.
366
+
367
+ Returns
368
+ -------
369
+ alpha_star : float or None
370
+ Best alpha, or None if the line search algorithm did not converge.
371
+ phi_star : float
372
+ phi at alpha_star.
373
+ phi0 : float
374
+ phi at 0.
375
+ derphi_star : float or None
376
+ derphi at alpha_star, or None if the line search algorithm
377
+ did not converge.
378
+
379
+ Notes
380
+ -----
381
+ Uses the line search algorithm to enforce strong Wolfe
382
+ conditions. See Wright and Nocedal, 'Numerical Optimization',
383
+ 1999, pp. 59-61.
384
+
385
+ """
386
+ _check_c1_c2(c1, c2)
387
+
388
+ if phi0 is None:
389
+ phi0 = phi(0.)
390
+
391
+ if derphi0 is None:
392
+ derphi0 = derphi(0.)
393
+
394
+ alpha0 = 0
395
+ if old_phi0 is not None and derphi0 != 0:
396
+ alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
397
+ else:
398
+ alpha1 = 1.0
399
+
400
+ if alpha1 < 0:
401
+ alpha1 = 1.0
402
+
403
+ if amax is not None:
404
+ alpha1 = min(alpha1, amax)
405
+
406
+ phi_a1 = phi(alpha1)
407
+ #derphi_a1 = derphi(alpha1) evaluated below
408
+
409
+ phi_a0 = phi0
410
+ derphi_a0 = derphi0
411
+
412
+ if extra_condition is None:
413
+ def extra_condition(alpha, phi):
414
+ return True
415
+
416
+ for i in range(maxiter):
417
+ if alpha1 == 0 or (amax is not None and alpha0 > amax):
418
+ # alpha1 == 0: This shouldn't happen. Perhaps the increment has
419
+ # slipped below machine precision?
420
+ alpha_star = None
421
+ phi_star = phi0
422
+ phi0 = old_phi0
423
+ derphi_star = None
424
+
425
+ if alpha1 == 0:
426
+ msg = 'Rounding errors prevent the line search from converging'
427
+ else:
428
+ msg = "The line search algorithm could not find a solution " + \
429
+ "less than or equal to amax: %s" % amax
430
+
431
+ warn(msg, LineSearchWarning, stacklevel=2)
432
+ break
433
+
434
+ not_first_iteration = i > 0
435
+ if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
436
+ ((phi_a1 >= phi_a0) and not_first_iteration):
437
+ alpha_star, phi_star, derphi_star = \
438
+ _zoom(alpha0, alpha1, phi_a0,
439
+ phi_a1, derphi_a0, phi, derphi,
440
+ phi0, derphi0, c1, c2, extra_condition)
441
+ break
442
+
443
+ derphi_a1 = derphi(alpha1)
444
+ if (abs(derphi_a1) <= -c2*derphi0):
445
+ if extra_condition(alpha1, phi_a1):
446
+ alpha_star = alpha1
447
+ phi_star = phi_a1
448
+ derphi_star = derphi_a1
449
+ break
450
+
451
+ if (derphi_a1 >= 0):
452
+ alpha_star, phi_star, derphi_star = \
453
+ _zoom(alpha1, alpha0, phi_a1,
454
+ phi_a0, derphi_a1, phi, derphi,
455
+ phi0, derphi0, c1, c2, extra_condition)
456
+ break
457
+
458
+ alpha2 = 2 * alpha1 # increase by factor of two on each iteration
459
+ if amax is not None:
460
+ alpha2 = min(alpha2, amax)
461
+ alpha0 = alpha1
462
+ alpha1 = alpha2
463
+ phi_a0 = phi_a1
464
+ phi_a1 = phi(alpha1)
465
+ derphi_a0 = derphi_a1
466
+
467
+ else:
468
+ # stopping test maxiter reached
469
+ alpha_star = alpha1
470
+ phi_star = phi_a1
471
+ derphi_star = None
472
+ warn('The line search algorithm did not converge',
473
+ LineSearchWarning, stacklevel=2)
474
+
475
+ return alpha_star, phi_star, phi0, derphi_star
476
+
477
+
478
+ def _cubicmin(a, fa, fpa, b, fb, c, fc):
479
+ """
480
+ Finds the minimizer for a cubic polynomial that goes through the
481
+ points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
482
+
483
+ If no minimizer can be found, return None.
484
+
485
+ """
486
+ # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
487
+
488
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
489
+ try:
490
+ C = fpa
491
+ db = b - a
492
+ dc = c - a
493
+ denom = (db * dc) ** 2 * (db - dc)
494
+ d1 = np.empty((2, 2))
495
+ d1[0, 0] = dc ** 2
496
+ d1[0, 1] = -db ** 2
497
+ d1[1, 0] = -dc ** 3
498
+ d1[1, 1] = db ** 3
499
+ [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
500
+ fc - fa - C * dc]).flatten())
501
+ A /= denom
502
+ B /= denom
503
+ radical = B * B - 3 * A * C
504
+ xmin = a + (-B + np.sqrt(radical)) / (3 * A)
505
+ except ArithmeticError:
506
+ return None
507
+ if not np.isfinite(xmin):
508
+ return None
509
+ return xmin
510
+
511
+
512
+ def _quadmin(a, fa, fpa, b, fb):
513
+ """
514
+ Finds the minimizer for a quadratic polynomial that goes through
515
+ the points (a,fa), (b,fb) with derivative at a of fpa.
516
+
517
+ """
518
+ # f(x) = B*(x-a)^2 + C*(x-a) + D
519
+ with np.errstate(divide='raise', over='raise', invalid='raise'):
520
+ try:
521
+ D = fa
522
+ C = fpa
523
+ db = b - a * 1.0
524
+ B = (fb - D - C * db) / (db * db)
525
+ xmin = a - C / (2.0 * B)
526
+ except ArithmeticError:
527
+ return None
528
+ if not np.isfinite(xmin):
529
+ return None
530
+ return xmin
531
+
532
+
533
+ def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
534
+ phi, derphi, phi0, derphi0, c1, c2, extra_condition):
535
+ """Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
536
+
537
+ Part of the optimization algorithm in `scalar_search_wolfe2`.
538
+
539
+ Notes
540
+ -----
541
+ Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
542
+ 'Numerical Optimization', 1999, pp. 61.
543
+
544
+ """
545
+
546
+ maxiter = 10
547
+ i = 0
548
+ delta1 = 0.2 # cubic interpolant check
549
+ delta2 = 0.1 # quadratic interpolant check
550
+ phi_rec = phi0
551
+ a_rec = 0
552
+ while True:
553
+ # interpolate to find a trial step length between a_lo and
554
+ # a_hi Need to choose interpolation here. Use cubic
555
+ # interpolation and then if the result is within delta *
556
+ # dalpha or outside of the interval bounded by a_lo or a_hi
557
+ # then use quadratic interpolation, if the result is still too
558
+ # close, then use bisection
559
+
560
+ dalpha = a_hi - a_lo
561
+ if dalpha < 0:
562
+ a, b = a_hi, a_lo
563
+ else:
564
+ a, b = a_lo, a_hi
565
+
566
+ # minimizer of cubic interpolant
567
+ # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
568
+ #
569
+ # if the result is too close to the end points (or out of the
570
+ # interval), then use quadratic interpolation with phi_lo,
571
+ # derphi_lo and phi_hi if the result is still too close to the
572
+ # end points (or out of the interval) then use bisection
573
+
574
+ if (i > 0):
575
+ cchk = delta1 * dalpha
576
+ a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
577
+ a_rec, phi_rec)
578
+ if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
579
+ qchk = delta2 * dalpha
580
+ a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
581
+ if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
582
+ a_j = a_lo + 0.5*dalpha
583
+
584
+ # Check new value of a_j
585
+
586
+ phi_aj = phi(a_j)
587
+ if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
588
+ phi_rec = phi_hi
589
+ a_rec = a_hi
590
+ a_hi = a_j
591
+ phi_hi = phi_aj
592
+ else:
593
+ derphi_aj = derphi(a_j)
594
+ if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
595
+ a_star = a_j
596
+ val_star = phi_aj
597
+ valprime_star = derphi_aj
598
+ break
599
+ if derphi_aj*(a_hi - a_lo) >= 0:
600
+ phi_rec = phi_hi
601
+ a_rec = a_hi
602
+ a_hi = a_lo
603
+ phi_hi = phi_lo
604
+ else:
605
+ phi_rec = phi_lo
606
+ a_rec = a_lo
607
+ a_lo = a_j
608
+ phi_lo = phi_aj
609
+ derphi_lo = derphi_aj
610
+ i += 1
611
+ if (i > maxiter):
612
+ # Failed to find a conforming step size
613
+ a_star = None
614
+ val_star = None
615
+ valprime_star = None
616
+ break
617
+ return a_star, val_star, valprime_star
618
+
619
+
620
+ #------------------------------------------------------------------------------
621
+ # Armijo line and scalar searches
622
+ #------------------------------------------------------------------------------
623
+
624
+ def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
625
+ """Minimize over alpha, the function ``f(xk+alpha pk)``.
626
+
627
+ Parameters
628
+ ----------
629
+ f : callable
630
+ Function to be minimized.
631
+ xk : array_like
632
+ Current point.
633
+ pk : array_like
634
+ Search direction.
635
+ gfk : array_like
636
+ Gradient of `f` at point `xk`.
637
+ old_fval : float
638
+ Value of `f` at point `xk`.
639
+ args : tuple, optional
640
+ Optional arguments.
641
+ c1 : float, optional
642
+ Value to control stopping criterion.
643
+ alpha0 : scalar, optional
644
+ Value of `alpha` at start of the optimization.
645
+
646
+ Returns
647
+ -------
648
+ alpha
649
+ f_count
650
+ f_val_at_alpha
651
+
652
+ Notes
653
+ -----
654
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
655
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
656
+
657
+ """
658
+ xk = np.atleast_1d(xk)
659
+ fc = [0]
660
+
661
+ def phi(alpha1):
662
+ fc[0] += 1
663
+ return f(xk + alpha1*pk, *args)
664
+
665
+ if old_fval is None:
666
+ phi0 = phi(0.)
667
+ else:
668
+ phi0 = old_fval # compute f(xk) -- done in past loop
669
+
670
+ derphi0 = np.dot(gfk, pk)
671
+ alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
672
+ alpha0=alpha0)
673
+ return alpha, fc[0], phi1
674
+
675
+
676
+ def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
677
+ """
678
+ Compatibility wrapper for `line_search_armijo`
679
+ """
680
+ r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
681
+ alpha0=alpha0)
682
+ return r[0], r[1], 0, r[2]
683
+
684
+
685
+ def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
686
+ """Minimize over alpha, the function ``phi(alpha)``.
687
+
688
+ Uses the interpolation algorithm (Armijo backtracking) as suggested by
689
+ Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
690
+
691
+ alpha > 0 is assumed to be a descent direction.
692
+
693
+ Returns
694
+ -------
695
+ alpha
696
+ phi1
697
+
698
+ """
699
+ phi_a0 = phi(alpha0)
700
+ if phi_a0 <= phi0 + c1*alpha0*derphi0:
701
+ return alpha0, phi_a0
702
+
703
+ # Otherwise, compute the minimizer of a quadratic interpolant:
704
+
705
+ alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
706
+ phi_a1 = phi(alpha1)
707
+
708
+ if (phi_a1 <= phi0 + c1*alpha1*derphi0):
709
+ return alpha1, phi_a1
710
+
711
+ # Otherwise, loop with cubic interpolation until we find an alpha which
712
+ # satisfies the first Wolfe condition (since we are backtracking, we will
713
+ # assume that the value of alpha is not too small and satisfies the second
714
+ # condition.
715
+
716
+ while alpha1 > amin: # we are assuming alpha>0 is a descent direction
717
+ factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
718
+ a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
719
+ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
720
+ a = a / factor
721
+ b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
722
+ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
723
+ b = b / factor
724
+
725
+ alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
726
+ phi_a2 = phi(alpha2)
727
+
728
+ if (phi_a2 <= phi0 + c1*alpha2*derphi0):
729
+ return alpha2, phi_a2
730
+
731
+ if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
732
+ alpha2 = alpha1 / 2.0
733
+
734
+ alpha0 = alpha1
735
+ alpha1 = alpha2
736
+ phi_a0 = phi_a1
737
+ phi_a1 = phi_a2
738
+
739
+ # Failed to find a suitable step length
740
+ return None, phi_a1
741
+
742
+
743
+ #------------------------------------------------------------------------------
744
+ # Non-monotone line search for DF-SANE
745
+ #------------------------------------------------------------------------------
746
+
747
+ def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
748
+ gamma=1e-4, tau_min=0.1, tau_max=0.5):
749
+ """
750
+ Nonmonotone backtracking line search as described in [1]_
751
+
752
+ Parameters
753
+ ----------
754
+ f : callable
755
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
756
+ of a merit function and ``F`` the residual.
757
+ x_k : ndarray
758
+ Initial position.
759
+ d : ndarray
760
+ Search direction.
761
+ prev_fs : float
762
+ List of previous merit function values. Should have ``len(prev_fs) <= M``
763
+ where ``M`` is the nonmonotonicity window parameter.
764
+ eta : float
765
+ Allowed merit function increase, see [1]_
766
+ gamma, tau_min, tau_max : float, optional
767
+ Search parameters, see [1]_
768
+
769
+ Returns
770
+ -------
771
+ alpha : float
772
+ Step length
773
+ xp : ndarray
774
+ Next position
775
+ fp : float
776
+ Merit function value at next position
777
+ Fp : ndarray
778
+ Residual at next position
779
+
780
+ References
781
+ ----------
782
+ [1] "Spectral residual method without gradient information for solving
783
+ large-scale nonlinear systems of equations." W. La Cruz,
784
+ J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
785
+
786
+ """
787
+ f_k = prev_fs[-1]
788
+ f_bar = max(prev_fs)
789
+
790
+ alpha_p = 1
791
+ alpha_m = 1
792
+ alpha = 1
793
+
794
+ while True:
795
+ xp = x_k + alpha_p * d
796
+ fp, Fp = f(xp)
797
+
798
+ if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
799
+ alpha = alpha_p
800
+ break
801
+
802
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
803
+
804
+ xp = x_k - alpha_m * d
805
+ fp, Fp = f(xp)
806
+
807
+ if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
808
+ alpha = -alpha_m
809
+ break
810
+
811
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
812
+
813
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
814
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
815
+
816
+ return alpha, xp, fp, Fp
817
+
818
+
819
+ def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
820
+ gamma=1e-4, tau_min=0.1, tau_max=0.5,
821
+ nu=0.85):
822
+ """
823
+ Nonmonotone line search from [1]
824
+
825
+ Parameters
826
+ ----------
827
+ f : callable
828
+ Function returning a tuple ``(f, F)`` where ``f`` is the value
829
+ of a merit function and ``F`` the residual.
830
+ x_k : ndarray
831
+ Initial position.
832
+ d : ndarray
833
+ Search direction.
834
+ f_k : float
835
+ Initial merit function value.
836
+ C, Q : float
837
+ Control parameters. On the first iteration, give values
838
+ Q=1.0, C=f_k
839
+ eta : float
840
+ Allowed merit function increase, see [1]_
841
+ nu, gamma, tau_min, tau_max : float, optional
842
+ Search parameters, see [1]_
843
+
844
+ Returns
845
+ -------
846
+ alpha : float
847
+ Step length
848
+ xp : ndarray
849
+ Next position
850
+ fp : float
851
+ Merit function value at next position
852
+ Fp : ndarray
853
+ Residual at next position
854
+ C : float
855
+ New value for the control parameter C
856
+ Q : float
857
+ New value for the control parameter Q
858
+
859
+ References
860
+ ----------
861
+ .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
862
+ search and its application to the spectral residual
863
+ method'', IMA J. Numer. Anal. 29, 814 (2009).
864
+
865
+ """
866
+ alpha_p = 1
867
+ alpha_m = 1
868
+ alpha = 1
869
+
870
+ while True:
871
+ xp = x_k + alpha_p * d
872
+ fp, Fp = f(xp)
873
+
874
+ if fp <= C + eta - gamma * alpha_p**2 * f_k:
875
+ alpha = alpha_p
876
+ break
877
+
878
+ alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
879
+
880
+ xp = x_k - alpha_m * d
881
+ fp, Fp = f(xp)
882
+
883
+ if fp <= C + eta - gamma * alpha_m**2 * f_k:
884
+ alpha = -alpha_m
885
+ break
886
+
887
+ alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
888
+
889
+ alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
890
+ alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
891
+
892
+ # Update C and Q
893
+ Q_next = nu * Q + 1
894
+ C = (nu * Q * (C + eta) + fp) / Q_next
895
+ Q = Q_next
896
+
897
+ return alpha, xp, fp, Fp, C, Q
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A top-level linear programming interface.
3
+
4
+ .. versionadded:: 0.15.0
5
+
6
+ Functions
7
+ ---------
8
+ .. autosummary::
9
+ :toctree: generated/
10
+
11
+ linprog
12
+ linprog_verbose_callback
13
+ linprog_terse_callback
14
+
15
+ """
16
+
17
+ import numpy as np
18
+
19
+ from ._optimize import OptimizeResult, OptimizeWarning
20
+ from warnings import warn
21
+ from ._linprog_highs import _linprog_highs
22
+ from ._linprog_ip import _linprog_ip
23
+ from ._linprog_simplex import _linprog_simplex
24
+ from ._linprog_rs import _linprog_rs
25
+ from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401
26
+ _linprog_rs_doc, _linprog_simplex_doc,
27
+ _linprog_highs_ipm_doc, _linprog_highs_ds_doc)
28
+ from ._linprog_util import (
29
+ _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
30
+ _postsolve, _check_result, _display_summary)
31
+ from copy import deepcopy
32
+
33
+ __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
34
+
35
+ __docformat__ = "restructuredtext en"
36
+
37
+ LINPROG_METHODS = [
38
+ 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'
39
+ ]
40
+
41
+
42
+ def linprog_verbose_callback(res):
43
+ """
44
+ A sample callback function demonstrating the linprog callback interface.
45
+ This callback produces detailed output to sys.stdout before each iteration
46
+ and after the final iteration of the simplex algorithm.
47
+
48
+ Parameters
49
+ ----------
50
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
51
+
52
+ x : 1-D array
53
+ The independent variable vector which optimizes the linear
54
+ programming problem.
55
+ fun : float
56
+ Value of the objective function.
57
+ success : bool
58
+ True if the algorithm succeeded in finding an optimal solution.
59
+ slack : 1-D array
60
+ The values of the slack variables. Each slack variable corresponds
61
+ to an inequality constraint. If the slack is zero, then the
62
+ corresponding constraint is active.
63
+ con : 1-D array
64
+ The (nominally zero) residuals of the equality constraints, that is,
65
+ ``b - A_eq @ x``
66
+ phase : int
67
+ The phase of the optimization being executed. In phase 1 a basic
68
+ feasible solution is sought and the T has an additional row
69
+ representing an alternate objective function.
70
+ status : int
71
+ An integer representing the exit status of the optimization::
72
+
73
+ 0 : Optimization terminated successfully
74
+ 1 : Iteration limit reached
75
+ 2 : Problem appears to be infeasible
76
+ 3 : Problem appears to be unbounded
77
+ 4 : Serious numerical difficulties encountered
78
+
79
+ nit : int
80
+ The number of iterations performed.
81
+ message : str
82
+ A string descriptor of the exit status of the optimization.
83
+ """
84
+ x = res['x']
85
+ fun = res['fun']
86
+ phase = res['phase']
87
+ status = res['status']
88
+ nit = res['nit']
89
+ message = res['message']
90
+ complete = res['complete']
91
+
92
+ saved_printoptions = np.get_printoptions()
93
+ np.set_printoptions(linewidth=500,
94
+ formatter={'float': lambda x: f"{x: 12.4f}"})
95
+ if status:
96
+ print('--------- Simplex Early Exit -------\n')
97
+ print(f'The simplex method exited early with status {status:d}')
98
+ print(message)
99
+ elif complete:
100
+ print('--------- Simplex Complete --------\n')
101
+ print(f'Iterations required: {nit}')
102
+ else:
103
+ print(f'--------- Iteration {nit:d} ---------\n')
104
+
105
+ if nit > 0:
106
+ if phase == 1:
107
+ print('Current Pseudo-Objective Value:')
108
+ else:
109
+ print('Current Objective Value:')
110
+ print('f = ', fun)
111
+ print()
112
+ print('Current Solution Vector:')
113
+ print('x = ', x)
114
+ print()
115
+
116
+ np.set_printoptions(**saved_printoptions)
117
+
118
+
119
+ def linprog_terse_callback(res):
120
+ """
121
+ A sample callback function demonstrating the linprog callback interface.
122
+ This callback produces brief output to sys.stdout before each iteration
123
+ and after the final iteration of the simplex algorithm.
124
+
125
+ Parameters
126
+ ----------
127
+ res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
128
+
129
+ x : 1-D array
130
+ The independent variable vector which optimizes the linear
131
+ programming problem.
132
+ fun : float
133
+ Value of the objective function.
134
+ success : bool
135
+ True if the algorithm succeeded in finding an optimal solution.
136
+ slack : 1-D array
137
+ The values of the slack variables. Each slack variable corresponds
138
+ to an inequality constraint. If the slack is zero, then the
139
+ corresponding constraint is active.
140
+ con : 1-D array
141
+ The (nominally zero) residuals of the equality constraints, that is,
142
+ ``b - A_eq @ x``.
143
+ phase : int
144
+ The phase of the optimization being executed. In phase 1 a basic
145
+ feasible solution is sought and the T has an additional row
146
+ representing an alternate objective function.
147
+ status : int
148
+ An integer representing the exit status of the optimization::
149
+
150
+ 0 : Optimization terminated successfully
151
+ 1 : Iteration limit reached
152
+ 2 : Problem appears to be infeasible
153
+ 3 : Problem appears to be unbounded
154
+ 4 : Serious numerical difficulties encountered
155
+
156
+ nit : int
157
+ The number of iterations performed.
158
+ message : str
159
+ A string descriptor of the exit status of the optimization.
160
+ """
161
+ nit = res['nit']
162
+ x = res['x']
163
+
164
+ if nit == 0:
165
+ print("Iter: X:")
166
+ print(f"{nit: <5d} ", end="")
167
+ print(x)
168
+
169
+
170
+ def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
171
+ bounds=(0, None), method='highs', callback=None,
172
+ options=None, x0=None, integrality=None):
173
+ r"""
174
+ Linear programming: minimize a linear objective function subject to linear
175
+ equality and inequality constraints.
176
+
177
+ Linear programming solves problems of the following form:
178
+
179
+ .. math::
180
+
181
+ \min_x \ & c^T x \\
182
+ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
183
+ & A_{eq} x = b_{eq},\\
184
+ & l \leq x \leq u ,
185
+
186
+ where :math:`x` is a vector of decision variables; :math:`c`,
187
+ :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
188
+ :math:`A_{ub}` and :math:`A_{eq}` are matrices.
189
+
190
+ Alternatively, that's:
191
+
192
+ - minimize ::
193
+
194
+ c @ x
195
+
196
+ - such that ::
197
+
198
+ A_ub @ x <= b_ub
199
+ A_eq @ x == b_eq
200
+ lb <= x <= ub
201
+
202
+ Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be
203
+ specified with ``bounds``.
204
+
205
+ Parameters
206
+ ----------
207
+ c : 1-D array
208
+ The coefficients of the linear objective function to be minimized.
209
+ A_ub : 2-D array, optional
210
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
211
+ coefficients of a linear inequality constraint on ``x``.
212
+ b_ub : 1-D array, optional
213
+ The inequality constraint vector. Each element represents an
214
+ upper bound on the corresponding value of ``A_ub @ x``.
215
+ A_eq : 2-D array, optional
216
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
217
+ coefficients of a linear equality constraint on ``x``.
218
+ b_eq : 1-D array, optional
219
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
220
+ the corresponding element of ``b_eq``.
221
+ bounds : sequence, optional
222
+ A sequence of ``(min, max)`` pairs for each element in ``x``, defining
223
+ the minimum and maximum values of that decision variable.
224
+ If a single tuple ``(min, max)`` is provided, then ``min`` and ``max``
225
+ will serve as bounds for all decision variables.
226
+ Use ``None`` to indicate that there is no bound. For instance, the
227
+ default bound ``(0, None)`` means that all decision variables are
228
+ non-negative, and the pair ``(None, None)`` means no bounds at all,
229
+ i.e. all variables are allowed to be any real.
230
+ method : str, optional
231
+ The algorithm used to solve the standard form problem.
232
+ :ref:`'highs' <optimize.linprog-highs>` (default),
233
+ :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
234
+ :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
235
+ :ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
236
+ :ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
237
+ and
238
+ :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
239
+ The legacy methods are deprecated and will be removed in SciPy 1.11.0.
240
+ callback : callable, optional
241
+ If a callback function is provided, it will be called at least once per
242
+ iteration of the algorithm. The callback function must accept a single
243
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
244
+
245
+ x : 1-D array
246
+ The current solution vector.
247
+ fun : float
248
+ The current value of the objective function ``c @ x``.
249
+ success : bool
250
+ ``True`` when the algorithm has completed successfully.
251
+ slack : 1-D array
252
+ The (nominally positive) values of the slack,
253
+ ``b_ub - A_ub @ x``.
254
+ con : 1-D array
255
+ The (nominally zero) residuals of the equality constraints,
256
+ ``b_eq - A_eq @ x``.
257
+ phase : int
258
+ The phase of the algorithm being executed.
259
+ status : int
260
+ An integer representing the status of the algorithm.
261
+
262
+ ``0`` : Optimization proceeding nominally.
263
+
264
+ ``1`` : Iteration limit reached.
265
+
266
+ ``2`` : Problem appears to be infeasible.
267
+
268
+ ``3`` : Problem appears to be unbounded.
269
+
270
+ ``4`` : Numerical difficulties encountered.
271
+
272
+ nit : int
273
+ The current iteration number.
274
+ message : str
275
+ A string descriptor of the algorithm status.
276
+
277
+ Callback functions are not currently supported by the HiGHS methods.
278
+
279
+ options : dict, optional
280
+ A dictionary of solver options. All methods accept the following
281
+ options:
282
+
283
+ maxiter : int
284
+ Maximum number of iterations to perform.
285
+ Default: see method-specific documentation.
286
+ disp : bool
287
+ Set to ``True`` to print convergence messages.
288
+ Default: ``False``.
289
+ presolve : bool
290
+ Set to ``False`` to disable automatic presolve.
291
+ Default: ``True``.
292
+
293
+ All methods except the HiGHS solvers also accept:
294
+
295
+ tol : float
296
+ A tolerance which determines when a residual is "close enough" to
297
+ zero to be considered exactly zero.
298
+ autoscale : bool
299
+ Set to ``True`` to automatically perform equilibration.
300
+ Consider using this option if the numerical values in the
301
+ constraints are separated by several orders of magnitude.
302
+ Default: ``False``.
303
+ rr : bool
304
+ Set to ``False`` to disable automatic redundancy removal.
305
+ Default: ``True``.
306
+ rr_method : string
307
+ Method used to identify and remove redundant rows from the
308
+ equality constraint matrix after presolve. For problems with
309
+ dense input, the available methods for redundancy removal are:
310
+
311
+ "SVD":
312
+ Repeatedly performs singular value decomposition on
313
+ the matrix, detecting redundant rows based on nonzeros
314
+ in the left singular vectors that correspond with
315
+ zero singular values. May be fast when the matrix is
316
+ nearly full rank.
317
+ "pivot":
318
+ Uses the algorithm presented in [5]_ to identify
319
+ redundant rows.
320
+ "ID":
321
+ Uses a randomized interpolative decomposition.
322
+ Identifies columns of the matrix transpose not used in
323
+ a full-rank interpolative decomposition of the matrix.
324
+ None:
325
+ Uses "svd" if the matrix is nearly full rank, that is,
326
+ the difference between the matrix rank and the number
327
+ of rows is less than five. If not, uses "pivot". The
328
+ behavior of this default is subject to change without
329
+ prior notice.
330
+
331
+ Default: None.
332
+ For problems with sparse input, this option is ignored, and the
333
+ pivot-based algorithm presented in [5]_ is used.
334
+
335
+ For method-specific options, see
336
+ :func:`show_options('linprog') <show_options>`.
337
+
338
+ x0 : 1-D array, optional
339
+ Guess values of the decision variables, which will be refined by
340
+ the optimization algorithm. This argument is currently used only by the
341
+ 'revised simplex' method, and can only be used if `x0` represents a
342
+ basic feasible solution.
343
+
344
+ integrality : 1-D array or int, optional
345
+ Indicates the type of integrality constraint on each decision variable.
346
+
347
+ ``0`` : Continuous variable; no integrality constraint.
348
+
349
+ ``1`` : Integer variable; decision variable must be an integer
350
+ within `bounds`.
351
+
352
+ ``2`` : Semi-continuous variable; decision variable must be within
353
+ `bounds` or take value ``0``.
354
+
355
+ ``3`` : Semi-integer variable; decision variable must be an integer
356
+ within `bounds` or take value ``0``.
357
+
358
+ By default, all variables are continuous.
359
+
360
+ For mixed integrality constraints, supply an array of shape `c.shape`.
361
+ To infer a constraint on each decision variable from shorter inputs,
362
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
363
+
364
+ This argument is currently used only by the ``'highs'`` method and
365
+ ignored otherwise.
366
+
367
+ Returns
368
+ -------
369
+ res : OptimizeResult
370
+ A :class:`scipy.optimize.OptimizeResult` consisting of the fields
371
+ below. Note that the return types of the fields may depend on whether
372
+ the optimization was successful, therefore it is recommended to check
373
+ `OptimizeResult.status` before relying on the other fields:
374
+
375
+ x : 1-D array
376
+ The values of the decision variables that minimizes the
377
+ objective function while satisfying the constraints.
378
+ fun : float
379
+ The optimal value of the objective function ``c @ x``.
380
+ slack : 1-D array
381
+ The (nominally positive) values of the slack variables,
382
+ ``b_ub - A_ub @ x``.
383
+ con : 1-D array
384
+ The (nominally zero) residuals of the equality constraints,
385
+ ``b_eq - A_eq @ x``.
386
+ success : bool
387
+ ``True`` when the algorithm succeeds in finding an optimal
388
+ solution.
389
+ status : int
390
+ An integer representing the exit status of the algorithm.
391
+
392
+ ``0`` : Optimization terminated successfully.
393
+
394
+ ``1`` : Iteration limit reached.
395
+
396
+ ``2`` : Problem appears to be infeasible.
397
+
398
+ ``3`` : Problem appears to be unbounded.
399
+
400
+ ``4`` : Numerical difficulties encountered.
401
+
402
+ nit : int
403
+ The total number of iterations performed in all phases.
404
+ message : str
405
+ A string descriptor of the exit status of the algorithm.
406
+
407
+ See Also
408
+ --------
409
+ show_options : Additional options accepted by the solvers.
410
+
411
+ Notes
412
+ -----
413
+ This section describes the available solvers that can be selected by the
414
+ 'method' parameter.
415
+
416
+ `'highs-ds'` and
417
+ `'highs-ipm'` are interfaces to the
418
+ HiGHS simplex and interior-point method solvers [13]_, respectively.
419
+ `'highs'` (default) chooses between
420
+ the two automatically. These are the fastest linear
421
+ programming solvers in SciPy, especially for large, sparse problems;
422
+ which of these two is faster is problem-dependent.
423
+ The other solvers (`'interior-point'`, `'revised simplex'`, and
424
+ `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
425
+
426
+ Method *highs-ds* is a wrapper of the C++ high performance dual
427
+ revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
428
+ is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
429
+ **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
430
+ as a simplex solver. Method *highs* chooses between the two automatically.
431
+ For new code involving `linprog`, we recommend explicitly choosing one of
432
+ these three method values.
433
+
434
+ .. versionadded:: 1.6.0
435
+
436
+ Method *interior-point* uses the primal-dual path following algorithm
437
+ as outlined in [4]_. This algorithm supports sparse constraint matrices and
438
+ is typically faster than the simplex methods, especially for large, sparse
439
+ problems. Note, however, that the solution returned may be slightly less
440
+ accurate than those of the simplex methods and will not, in general,
441
+ correspond with a vertex of the polytope defined by the constraints.
442
+
443
+ .. versionadded:: 1.0.0
444
+
445
+ Method *revised simplex* uses the revised simplex method as described in
446
+ [9]_, except that a factorization [11]_ of the basis matrix, rather than
447
+ its inverse, is efficiently maintained and used to solve the linear systems
448
+ at each iteration of the algorithm.
449
+
450
+ .. versionadded:: 1.3.0
451
+
452
+ Method *simplex* uses a traditional, full-tableau implementation of
453
+ Dantzig's simplex algorithm [1]_, [2]_ (*not* the
454
+ Nelder-Mead simplex). This algorithm is included for backwards
455
+ compatibility and educational purposes.
456
+
457
+ .. versionadded:: 0.15.0
458
+
459
+ Before applying *interior-point*, *revised simplex*, or *simplex*,
460
+ a presolve procedure based on [8]_ attempts
461
+ to identify trivial infeasibilities, trivial unboundedness, and potential
462
+ problem simplifications. Specifically, it checks for:
463
+
464
+ - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
465
+ - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
466
+ variables;
467
+ - column singletons in ``A_eq``, representing fixed variables; and
468
+ - column singletons in ``A_ub``, representing simple bounds.
469
+
470
+ If presolve reveals that the problem is unbounded (e.g. an unconstrained
471
+ and unbounded variable has negative cost) or infeasible (e.g., a row of
472
+ zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
473
+ terminates with the appropriate status code. Note that presolve terminates
474
+ as soon as any sign of unboundedness is detected; consequently, a problem
475
+ may be reported as unbounded when in reality the problem is infeasible
476
+ (but infeasibility has not been detected yet). Therefore, if it is
477
+ important to know whether the problem is actually infeasible, solve the
478
+ problem again with option ``presolve=False``.
479
+
480
+ If neither infeasibility nor unboundedness are detected in a single pass
481
+ of the presolve, bounds are tightened where possible and fixed
482
+ variables are removed from the problem. Then, linearly dependent rows
483
+ of the ``A_eq`` matrix are removed, (unless they represent an
484
+ infeasibility) to avoid numerical difficulties in the primary solve
485
+ routine. Note that rows that are nearly linearly dependent (within a
486
+ prescribed tolerance) may also be removed, which can change the optimal
487
+ solution in rare cases. If this is a concern, eliminate redundancy from
488
+ your problem formulation and run with option ``rr=False`` or
489
+ ``presolve=False``.
490
+
491
+ Several potential improvements can be made here: additional presolve
492
+ checks outlined in [8]_ should be implemented, the presolve routine should
493
+ be run multiple times (until no further simplifications can be made), and
494
+ more of the efficiency improvements from [5]_ should be implemented in the
495
+ redundancy removal routines.
496
+
497
+ After presolve, the problem is transformed to standard form by converting
498
+ the (tightened) simple bounds to upper bound constraints, introducing
499
+ non-negative slack variables for inequality constraints, and expressing
500
+ unbounded variables as the difference between two non-negative variables.
501
+ Optionally, the problem is automatically scaled via equilibration [12]_.
502
+ The selected algorithm solves the standard form problem, and a
503
+ postprocessing routine converts the result to a solution to the original
504
+ problem.
505
+
506
+ References
507
+ ----------
508
+ .. [1] Dantzig, George B., Linear programming and extensions. Rand
509
+ Corporation Research Study Princeton Univ. Press, Princeton, NJ,
510
+ 1963
511
+ .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
512
+ Mathematical Programming", McGraw-Hill, Chapter 4.
513
+ .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
514
+ Mathematics of Operations Research (2), 1977: pp. 103-107.
515
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
516
+ optimizer for linear programming: an implementation of the
517
+ homogeneous algorithm." High performance optimization. Springer US,
518
+ 2000. 197-232.
519
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
520
+ large-scale linear programming." Optimization Methods and Software
521
+ 6.3 (1995): 219-227.
522
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
523
+ Programming based on Newton's Method." Unpublished Course Notes,
524
+ March 2004. Available 2/25/2017 at
525
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
526
+ .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
527
+ Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
528
+ http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
529
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
530
+ programming." Mathematical Programming 71.2 (1995): 221-245.
531
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
532
+ programming." Athena Scientific 1 (1997): 997.
533
+ .. [10] Andersen, Erling D., et al. Implementation of interior point
534
+ methods for large scale linear programming. HEC/Universite de
535
+ Geneve, 1996.
536
+ .. [11] Bartels, Richard H. "A stabilization of the simplex method."
537
+ Journal in Numerische Mathematik 16.5 (1971): 414-434.
538
+ .. [12] Tomlin, J. A. "On scaling linear programming problems."
539
+ Mathematical Programming Study 4 (1975): 146-166.
540
+ .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
541
+ "HiGHS - high performance software for linear optimization."
542
+ https://highs.dev/
543
+ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
544
+ simplex method." Mathematical Programming Computation, 10 (1),
545
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
546
+
547
+ Examples
548
+ --------
549
+ Consider the following problem:
550
+
551
+ .. math::
552
+
553
+ \min_{x_0, x_1} \ -x_0 + 4x_1 & \\
554
+ \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
555
+ -x_0 - 2x_1 & \geq -4,\\
556
+ x_1 & \geq -3.
557
+
558
+ The problem is not presented in the form accepted by `linprog`. This is
559
+ easily remedied by converting the "greater than" inequality
560
+ constraint to a "less than" inequality constraint by
561
+ multiplying both sides by a factor of :math:`-1`. Note also that the last
562
+ constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
563
+ Finally, since there are no bounds on :math:`x_0`, we must explicitly
564
+ specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
565
+ default is for variables to be non-negative. After collecting coeffecients
566
+ into arrays and tuples, the input for this problem is:
567
+
568
+ >>> from scipy.optimize import linprog
569
+ >>> c = [-1, 4]
570
+ >>> A = [[-3, 1], [1, 2]]
571
+ >>> b = [6, 4]
572
+ >>> x0_bounds = (None, None)
573
+ >>> x1_bounds = (-3, None)
574
+ >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
575
+ >>> res.fun
576
+ -22.0
577
+ >>> res.x
578
+ array([10., -3.])
579
+ >>> res.message
580
+ 'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
581
+
582
+ The marginals (AKA dual values / shadow prices / Lagrange multipliers)
583
+ and residuals (slacks) are also available.
584
+
585
+ >>> res.ineqlin
586
+ residual: [ 3.900e+01 0.000e+00]
587
+ marginals: [-0.000e+00 -1.000e+00]
588
+
589
+ For example, because the marginal associated with the second inequality
590
+ constraint is -1, we expect the optimal value of the objective function
591
+ to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
592
+ side of the second inequality constraint:
593
+
594
+ >>> eps = 0.05
595
+ >>> b[1] += eps
596
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
597
+ -22.05
598
+
599
+ Also, because the residual on the first inequality constraint is 39, we
600
+ can decrease the right hand side of the first constraint by 39 without
601
+ affecting the optimal solution.
602
+
603
+ >>> b = [6, 4] # reset to original values
604
+ >>> b[0] -= 39
605
+ >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
606
+ -22.0
607
+
608
+ """
609
+
610
+ meth = method.lower()
611
+ methods = {"highs", "highs-ds", "highs-ipm",
612
+ "simplex", "revised simplex", "interior-point"}
613
+
614
+ if meth not in methods:
615
+ raise ValueError(f"Unknown solver '{method}'")
616
+
617
+ if x0 is not None and meth != "revised simplex":
618
+ warning_message = "x0 is used only when method is 'revised simplex'. "
619
+ warn(warning_message, OptimizeWarning, stacklevel=2)
620
+
621
+ if np.any(integrality) and not meth == "highs":
622
+ integrality = None
623
+ warning_message = ("Only `method='highs'` supports integer "
624
+ "constraints. Ignoring `integrality`.")
625
+ warn(warning_message, OptimizeWarning, stacklevel=2)
626
+ elif np.any(integrality):
627
+ integrality = np.broadcast_to(integrality, np.shape(c))
628
+
629
+ lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
630
+ lp, solver_options = _parse_linprog(lp, options, meth)
631
+ tol = solver_options.get('tol', 1e-9)
632
+
633
+ # Give unmodified problem to HiGHS
634
+ if meth.startswith('highs'):
635
+ if callback is not None:
636
+ raise NotImplementedError("HiGHS solvers do not support the "
637
+ "callback interface.")
638
+ highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
639
+ 'highs': None}
640
+
641
+ sol = _linprog_highs(lp, solver=highs_solvers[meth],
642
+ **solver_options)
643
+ sol['status'], sol['message'] = (
644
+ _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
645
+ sol['con'], lp.bounds, tol, sol['message'],
646
+ integrality))
647
+ sol['success'] = sol['status'] == 0
648
+ return OptimizeResult(sol)
649
+
650
+ warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
651
+ "1.11.0. Please use one of the HiGHS solvers (e.g. "
652
+ "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
653
+
654
+ iteration = 0
655
+ complete = False # will become True if solved in presolve
656
+ undo = []
657
+
658
+ # Keep the original arrays to calculate slack/residuals for original
659
+ # problem.
660
+ lp_o = deepcopy(lp)
661
+
662
+ # Solve trivial problem, eliminate variables, tighten bounds, etc.
663
+ rr_method = solver_options.pop('rr_method', None) # need to pop these;
664
+ rr = solver_options.pop('rr', True) # they're not passed to methods
665
+ c0 = 0 # we might get a constant term in the objective
666
+ if solver_options.pop('presolve', True):
667
+ (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
668
+ rr_method,
669
+ tol)
670
+
671
+ C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
672
+ postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
673
+
674
+ if not complete:
675
+ A, b, c, c0, x0 = _get_Abc(lp, c0)
676
+ if solver_options.pop('autoscale', False):
677
+ A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
678
+ postsolve_args = postsolve_args[:-2] + (C, b_scale)
679
+
680
+ if meth == 'simplex':
681
+ x, status, message, iteration = _linprog_simplex(
682
+ c, c0=c0, A=A, b=b, callback=callback,
683
+ postsolve_args=postsolve_args, **solver_options)
684
+ elif meth == 'interior-point':
685
+ x, status, message, iteration = _linprog_ip(
686
+ c, c0=c0, A=A, b=b, callback=callback,
687
+ postsolve_args=postsolve_args, **solver_options)
688
+ elif meth == 'revised simplex':
689
+ x, status, message, iteration = _linprog_rs(
690
+ c, c0=c0, A=A, b=b, x0=x0, callback=callback,
691
+ postsolve_args=postsolve_args, **solver_options)
692
+
693
+ # Eliminate artificial variables, re-introduce presolved variables, etc.
694
+ disp = solver_options.get('disp', False)
695
+
696
+ x, fun, slack, con = _postsolve(x, postsolve_args, complete)
697
+
698
+ status, message = _check_result(x, fun, status, slack, con, lp_o.bounds,
699
+ tol, message, integrality)
700
+
701
+ if disp:
702
+ _display_summary(message, status, fun, iteration)
703
+
704
+ sol = {
705
+ 'x': x,
706
+ 'fun': fun,
707
+ 'slack': slack,
708
+ 'con': con,
709
+ 'status': status,
710
+ 'message': message,
711
+ 'nit': iteration,
712
+ 'success': status == 0}
713
+
714
+ return OptimizeResult(sol)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py ADDED
@@ -0,0 +1,1126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interior-point method for linear programming
2
+
3
+ The *interior-point* method uses the primal-dual path following algorithm
4
+ outlined in [1]_. This algorithm supports sparse constraint matrices and
5
+ is typically faster than the simplex methods, especially for large, sparse
6
+ problems. Note, however, that the solution returned may be slightly less
7
+ accurate than those of the simplex methods and will not, in general,
8
+ correspond with a vertex of the polytope defined by the constraints.
9
+
10
+ .. versionadded:: 1.0.0
11
+
12
+ References
13
+ ----------
14
+ .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
15
+ optimizer for linear programming: an implementation of the
16
+ homogeneous algorithm." High performance optimization. Springer US,
17
+ 2000. 197-232.
18
+ """
19
+ # Author: Matt Haberland
20
+
21
+ import numpy as np
22
+ import scipy as sp
23
+ import scipy.sparse as sps
24
+ from warnings import warn
25
+ from scipy.linalg import LinAlgError
26
+ from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options
27
+ from ._linprog_util import _postsolve
28
+ has_umfpack = True
29
+ has_cholmod = True
30
+ try:
31
+ import sksparse # noqa: F401
32
+ from sksparse.cholmod import cholesky as cholmod # noqa: F401
33
+ from sksparse.cholmod import analyze as cholmod_analyze
34
+ except ImportError:
35
+ has_cholmod = False
36
+ try:
37
+ import scikits.umfpack # test whether to use factorized # noqa: F401
38
+ except ImportError:
39
+ has_umfpack = False
40
+
41
+
42
+ def _get_solver(M, sparse=False, lstsq=False, sym_pos=True,
43
+ cholesky=True, permc_spec='MMD_AT_PLUS_A'):
44
+ """
45
+ Given solver options, return a handle to the appropriate linear system
46
+ solver.
47
+
48
+ Parameters
49
+ ----------
50
+ M : 2-D array
51
+ As defined in [4] Equation 8.31
52
+ sparse : bool (default = False)
53
+ True if the system to be solved is sparse. This is typically set
54
+ True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
55
+ lstsq : bool (default = False)
56
+ True if the system is ill-conditioned and/or (nearly) singular and
57
+ thus a more robust least-squares solver is desired. This is sometimes
58
+ needed as the solution is approached.
59
+ sym_pos : bool (default = True)
60
+ True if the system matrix is symmetric positive definite
61
+ Sometimes this needs to be set false as the solution is approached,
62
+ even when the system should be symmetric positive definite, due to
63
+ numerical difficulties.
64
+ cholesky : bool (default = True)
65
+ True if the system is to be solved by Cholesky, rather than LU,
66
+ decomposition. This is typically faster unless the problem is very
67
+ small or prone to numerical difficulties.
68
+ permc_spec : str (default = 'MMD_AT_PLUS_A')
69
+ Sparsity preservation strategy used by SuperLU. Acceptable values are:
70
+
71
+ - ``NATURAL``: natural ordering.
72
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
73
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
74
+ - ``COLAMD``: approximate minimum degree column ordering.
75
+
76
+ See SuperLU documentation.
77
+
78
+ Returns
79
+ -------
80
+ solve : function
81
+ Handle to the appropriate solver function
82
+
83
+ """
84
+ try:
85
+ if sparse:
86
+ if lstsq:
87
+ def solve(r, sym_pos=False):
88
+ return sps.linalg.lsqr(M, r)[0]
89
+ elif cholesky:
90
+ try:
91
+ # Will raise an exception in the first call,
92
+ # or when the matrix changes due to a new problem
93
+ _get_solver.cholmod_factor.cholesky_inplace(M)
94
+ except Exception:
95
+ _get_solver.cholmod_factor = cholmod_analyze(M)
96
+ _get_solver.cholmod_factor.cholesky_inplace(M)
97
+ solve = _get_solver.cholmod_factor
98
+ else:
99
+ if has_umfpack and sym_pos:
100
+ solve = sps.linalg.factorized(M)
101
+ else: # factorized doesn't pass permc_spec
102
+ solve = sps.linalg.splu(M, permc_spec=permc_spec).solve
103
+
104
+ else:
105
+ if lstsq: # sometimes necessary as solution is approached
106
+ def solve(r):
107
+ return sp.linalg.lstsq(M, r)[0]
108
+ elif cholesky:
109
+ L = sp.linalg.cho_factor(M)
110
+
111
+ def solve(r):
112
+ return sp.linalg.cho_solve(L, r)
113
+ else:
114
+ # this seems to cache the matrix factorization, so solving
115
+ # with multiple right hand sides is much faster
116
+ def solve(r, sym_pos=sym_pos):
117
+ if sym_pos:
118
+ return sp.linalg.solve(M, r, assume_a="pos")
119
+ else:
120
+ return sp.linalg.solve(M, r)
121
+ # There are many things that can go wrong here, and it's hard to say
122
+ # what all of them are. It doesn't really matter: if the matrix can't be
123
+ # factorized, return None. get_solver will be called again with different
124
+ # inputs, and a new routine will try to factorize the matrix.
125
+ except KeyboardInterrupt:
126
+ raise
127
+ except Exception:
128
+ return None
129
+ return solve
130
+
131
+
132
+ def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False,
133
+ lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False,
134
+ permc_spec='MMD_AT_PLUS_A'):
135
+ """
136
+ Given standard form problem defined by ``A``, ``b``, and ``c``;
137
+ current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``;
138
+ algorithmic parameters ``gamma and ``eta;
139
+ and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc``
140
+ (predictor-corrector), and ``ip`` (initial point improvement),
141
+ get the search direction for increments to the variable estimates.
142
+
143
+ Parameters
144
+ ----------
145
+ As defined in [4], except:
146
+ sparse : bool
147
+ True if the system to be solved is sparse. This is typically set
148
+ True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
149
+ lstsq : bool
150
+ True if the system is ill-conditioned and/or (nearly) singular and
151
+ thus a more robust least-squares solver is desired. This is sometimes
152
+ needed as the solution is approached.
153
+ sym_pos : bool
154
+ True if the system matrix is symmetric positive definite
155
+ Sometimes this needs to be set false as the solution is approached,
156
+ even when the system should be symmetric positive definite, due to
157
+ numerical difficulties.
158
+ cholesky : bool
159
+ True if the system is to be solved by Cholesky, rather than LU,
160
+ decomposition. This is typically faster unless the problem is very
161
+ small or prone to numerical difficulties.
162
+ pc : bool
163
+ True if the predictor-corrector method of Mehrota is to be used. This
164
+ is almost always (if not always) beneficial. Even though it requires
165
+ the solution of an additional linear system, the factorization
166
+ is typically (implicitly) reused so solution is efficient, and the
167
+ number of algorithm iterations is typically reduced.
168
+ ip : bool
169
+ True if the improved initial point suggestion due to [4] section 4.3
170
+ is desired. It's unclear whether this is beneficial.
171
+ permc_spec : str (default = 'MMD_AT_PLUS_A')
172
+ (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
173
+ True``.) A matrix is factorized in each iteration of the algorithm.
174
+ This option specifies how to permute the columns of the matrix for
175
+ sparsity preservation. Acceptable values are:
176
+
177
+ - ``NATURAL``: natural ordering.
178
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
179
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
180
+ - ``COLAMD``: approximate minimum degree column ordering.
181
+
182
+ This option can impact the convergence of the
183
+ interior point algorithm; test different values to determine which
184
+ performs best for your problem. For more information, refer to
185
+ ``scipy.sparse.linalg.splu``.
186
+
187
+ Returns
188
+ -------
189
+ Search directions as defined in [4]
190
+
191
+ References
192
+ ----------
193
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
194
+ optimizer for linear programming: an implementation of the
195
+ homogeneous algorithm." High performance optimization. Springer US,
196
+ 2000. 197-232.
197
+
198
+ """
199
+ if A.shape[0] == 0:
200
+ # If there are no constraints, some solvers fail (understandably)
201
+ # rather than returning empty solution. This gets the job done.
202
+ sparse, lstsq, sym_pos, cholesky = False, False, True, False
203
+ n_x = len(x)
204
+
205
+ # [4] Equation 8.8
206
+ r_P = b * tau - A.dot(x)
207
+ r_D = c * tau - A.T.dot(y) - z
208
+ r_G = c.dot(x) - b.transpose().dot(y) + kappa
209
+ mu = (x.dot(z) + tau * kappa) / (n_x + 1)
210
+
211
+ # Assemble M from [4] Equation 8.31
212
+ Dinv = x / z
213
+
214
+ if sparse:
215
+ M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T))
216
+ else:
217
+ M = A.dot(Dinv.reshape(-1, 1) * A.T)
218
+ solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec)
219
+
220
+ # pc: "predictor-corrector" [4] Section 4.1
221
+ # In development this option could be turned off
222
+ # but it always seems to improve performance substantially
223
+ n_corrections = 1 if pc else 0
224
+
225
+ i = 0
226
+ alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0
227
+ while i <= n_corrections:
228
+ # Reference [4] Eq. 8.6
229
+ rhatp = eta(gamma) * r_P
230
+ rhatd = eta(gamma) * r_D
231
+ rhatg = eta(gamma) * r_G
232
+
233
+ # Reference [4] Eq. 8.7
234
+ rhatxs = gamma * mu - x * z
235
+ rhattk = gamma * mu - tau * kappa
236
+
237
+ if i == 1:
238
+ if ip: # if the correction is to get "initial point"
239
+ # Reference [4] Eq. 8.23
240
+ rhatxs = ((1 - alpha) * gamma * mu -
241
+ x * z - alpha**2 * d_x * d_z)
242
+ rhattk = ((1 - alpha) * gamma * mu -
243
+ tau * kappa -
244
+ alpha**2 * d_tau * d_kappa)
245
+ else: # if the correction is for "predictor-corrector"
246
+ # Reference [4] Eq. 8.13
247
+ rhatxs -= d_x * d_z
248
+ rhattk -= d_tau * d_kappa
249
+
250
+ # sometimes numerical difficulties arise as the solution is approached
251
+ # this loop tries to solve the equations using a sequence of functions
252
+ # for solve. For dense systems, the order is:
253
+ # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve,
254
+ # 2. scipy.linalg.solve w/ sym_pos = True,
255
+ # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails
256
+ # 4. scipy.linalg.lstsq
257
+ # For sparse systems, the order is:
258
+ # 1. sksparse.cholmod.cholesky (if available)
259
+ # 2. scipy.sparse.linalg.factorized (if umfpack available)
260
+ # 3. scipy.sparse.linalg.splu
261
+ # 4. scipy.sparse.linalg.lsqr
262
+ solved = False
263
+ while not solved:
264
+ try:
265
+ # [4] Equation 8.28
266
+ p, q = _sym_solve(Dinv, A, c, b, solve)
267
+ # [4] Equation 8.29
268
+ u, v = _sym_solve(Dinv, A, rhatd -
269
+ (1 / x) * rhatxs, rhatp, solve)
270
+ if np.any(np.isnan(p)) or np.any(np.isnan(q)):
271
+ raise LinAlgError
272
+ solved = True
273
+ except (LinAlgError, ValueError, TypeError) as e:
274
+ # Usually this doesn't happen. If it does, it happens when
275
+ # there are redundant constraints or when approaching the
276
+ # solution. If so, change solver.
277
+ if cholesky:
278
+ cholesky = False
279
+ warn(
280
+ "Solving system with option 'cholesky':True "
281
+ "failed. It is normal for this to happen "
282
+ "occasionally, especially as the solution is "
283
+ "approached. However, if you see this frequently, "
284
+ "consider setting option 'cholesky' to False.",
285
+ OptimizeWarning, stacklevel=5)
286
+ elif sym_pos:
287
+ sym_pos = False
288
+ warn(
289
+ "Solving system with option 'sym_pos':True "
290
+ "failed. It is normal for this to happen "
291
+ "occasionally, especially as the solution is "
292
+ "approached. However, if you see this frequently, "
293
+ "consider setting option 'sym_pos' to False.",
294
+ OptimizeWarning, stacklevel=5)
295
+ elif not lstsq:
296
+ lstsq = True
297
+ warn(
298
+ "Solving system with option 'sym_pos':False "
299
+ "failed. This may happen occasionally, "
300
+ "especially as the solution is "
301
+ "approached. However, if you see this frequently, "
302
+ "your problem may be numerically challenging. "
303
+ "If you cannot improve the formulation, consider "
304
+ "setting 'lstsq' to True. Consider also setting "
305
+ "`presolve` to True, if it is not already.",
306
+ OptimizeWarning, stacklevel=5)
307
+ else:
308
+ raise e
309
+ solve = _get_solver(M, sparse, lstsq, sym_pos,
310
+ cholesky, permc_spec)
311
+ # [4] Results after 8.29
312
+ d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) /
313
+ (1 / tau * kappa + (-c.dot(p) + b.dot(q))))
314
+ d_x = u + p * d_tau
315
+ d_y = v + q * d_tau
316
+
317
+ # [4] Relations between after 8.25 and 8.26
318
+ d_z = (1 / x) * (rhatxs - z * d_x)
319
+ d_kappa = 1 / tau * (rhattk - kappa * d_tau)
320
+
321
+ # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23
322
+ alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1)
323
+ if ip: # initial point - see [4] 4.4
324
+ gamma = 10
325
+ else: # predictor-corrector, [4] definition after 8.12
326
+ beta1 = 0.1 # [4] pg. 220 (Table 8.1)
327
+ gamma = (1 - alpha)**2 * min(beta1, (1 - alpha))
328
+ i += 1
329
+
330
+ return d_x, d_y, d_z, d_tau, d_kappa
331
+
332
+
333
+ def _sym_solve(Dinv, A, r1, r2, solve):
334
+ """
335
+ An implementation of [4] equation 8.31 and 8.32
336
+
337
+ References
338
+ ----------
339
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
340
+ optimizer for linear programming: an implementation of the
341
+ homogeneous algorithm." High performance optimization. Springer US,
342
+ 2000. 197-232.
343
+
344
+ """
345
+ # [4] 8.31
346
+ r = r2 + A.dot(Dinv * r1)
347
+ v = solve(r)
348
+ # [4] 8.32
349
+ u = Dinv * (A.T.dot(v) - r1)
350
+ return u, v
351
+
352
+
353
+ def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):
354
+ """
355
+ An implementation of [4] equation 8.21
356
+
357
+ References
358
+ ----------
359
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
360
+ optimizer for linear programming: an implementation of the
361
+ homogeneous algorithm." High performance optimization. Springer US,
362
+ 2000. 197-232.
363
+
364
+ """
365
+ # [4] 4.3 Equation 8.21, ignoring 8.20 requirement
366
+ # same step is taken in primal and dual spaces
367
+ # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3
368
+ # the value 1 is used in Mehrota corrector and initial point correction
369
+ i_x = d_x < 0
370
+ i_z = d_z < 0
371
+ alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1
372
+ alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1
373
+ alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1
374
+ alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1
375
+ alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])
376
+ return alpha
377
+
378
+
379
+ def _get_message(status):
380
+ """
381
+ Given problem status code, return a more detailed message.
382
+
383
+ Parameters
384
+ ----------
385
+ status : int
386
+ An integer representing the exit status of the optimization::
387
+
388
+ 0 : Optimization terminated successfully
389
+ 1 : Iteration limit reached
390
+ 2 : Problem appears to be infeasible
391
+ 3 : Problem appears to be unbounded
392
+ 4 : Serious numerical difficulties encountered
393
+
394
+ Returns
395
+ -------
396
+ message : str
397
+ A string descriptor of the exit status of the optimization.
398
+
399
+ """
400
+ messages = (
401
+ ["Optimization terminated successfully.",
402
+ "The iteration limit was reached before the algorithm converged.",
403
+ "The algorithm terminated successfully and determined that the "
404
+ "problem is infeasible.",
405
+ "The algorithm terminated successfully and determined that the "
406
+ "problem is unbounded.",
407
+ "Numerical difficulties were encountered before the problem "
408
+ "converged. Please check your problem formulation for errors, "
409
+ "independence of linear equality constraints, and reasonable "
410
+ "scaling and matrix condition numbers. If you continue to "
411
+ "encounter this error, please submit a bug report."
412
+ ])
413
+ return messages[status]
414
+
415
+
416
+ def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):
417
+ """
418
+ An implementation of [4] Equation 8.9
419
+
420
+ References
421
+ ----------
422
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
423
+ optimizer for linear programming: an implementation of the
424
+ homogeneous algorithm." High performance optimization. Springer US,
425
+ 2000. 197-232.
426
+
427
+ """
428
+ x = x + alpha * d_x
429
+ tau = tau + alpha * d_tau
430
+ z = z + alpha * d_z
431
+ kappa = kappa + alpha * d_kappa
432
+ y = y + alpha * d_y
433
+ return x, y, z, tau, kappa
434
+
435
+
436
+ def _get_blind_start(shape):
437
+ """
438
+ Return the starting point from [4] 4.4
439
+
440
+ References
441
+ ----------
442
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
443
+ optimizer for linear programming: an implementation of the
444
+ homogeneous algorithm." High performance optimization. Springer US,
445
+ 2000. 197-232.
446
+
447
+ """
448
+ m, n = shape
449
+ x0 = np.ones(n)
450
+ y0 = np.zeros(m)
451
+ z0 = np.ones(n)
452
+ tau0 = 1
453
+ kappa0 = 1
454
+ return x0, y0, z0, tau0, kappa0
455
+
456
+
457
+ def _indicators(A, b, c, c0, x, y, z, tau, kappa):
458
+ """
459
+ Implementation of several equations from [4] used as indicators of
460
+ the status of optimization.
461
+
462
+ References
463
+ ----------
464
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
465
+ optimizer for linear programming: an implementation of the
466
+ homogeneous algorithm." High performance optimization. Springer US,
467
+ 2000. 197-232.
468
+
469
+ """
470
+
471
+ # residuals for termination are relative to initial values
472
+ x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape)
473
+
474
+ # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8
475
+ def r_p(x, tau):
476
+ return b * tau - A.dot(x)
477
+
478
+ def r_d(y, z, tau):
479
+ return c * tau - A.T.dot(y) - z
480
+
481
+ def r_g(x, y, kappa):
482
+ return kappa + c.dot(x) - b.dot(y)
483
+
484
+ # np.dot unpacks if they are arrays of size one
485
+ def mu(x, tau, z, kappa):
486
+ return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1)
487
+
488
+ obj = c.dot(x / tau) + c0
489
+
490
+ def norm(a):
491
+ return np.linalg.norm(a)
492
+
493
+ # See [4], Section 4.5 - The Stopping Criteria
494
+ r_p0 = r_p(x0, tau0)
495
+ r_d0 = r_d(y0, z0, tau0)
496
+ r_g0 = r_g(x0, y0, kappa0)
497
+ mu_0 = mu(x0, tau0, z0, kappa0)
498
+ rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y)))
499
+ rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0))
500
+ rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0))
501
+ rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0))
502
+ rho_mu = mu(x, tau, z, kappa) / mu_0
503
+ return rho_p, rho_d, rho_A, rho_g, rho_mu, obj
504
+
505
+
506
+ def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):
507
+ """
508
+ Print indicators of optimization status to the console.
509
+
510
+ Parameters
511
+ ----------
512
+ rho_p : float
513
+ The (normalized) primal feasibility, see [4] 4.5
514
+ rho_d : float
515
+ The (normalized) dual feasibility, see [4] 4.5
516
+ rho_g : float
517
+ The (normalized) duality gap, see [4] 4.5
518
+ alpha : float
519
+ The step size, see [4] 4.3
520
+ rho_mu : float
521
+ The (normalized) path parameter, see [4] 4.5
522
+ obj : float
523
+ The objective function value of the current iterate
524
+ header : bool
525
+ True if a header is to be printed
526
+
527
+ References
528
+ ----------
529
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
530
+ optimizer for linear programming: an implementation of the
531
+ homogeneous algorithm." High performance optimization. Springer US,
532
+ 2000. 197-232.
533
+
534
+ """
535
+ if header:
536
+ print("Primal Feasibility ",
537
+ "Dual Feasibility ",
538
+ "Duality Gap ",
539
+ "Step ",
540
+ "Path Parameter ",
541
+ "Objective ")
542
+
543
+ # no clue why this works
544
+ fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'
545
+ print(fmt.format(
546
+ float(rho_p),
547
+ float(rho_d),
548
+ float(rho_g),
549
+ alpha if isinstance(alpha, str) else float(alpha),
550
+ float(rho_mu),
551
+ float(obj)))
552
+
553
+
554
+ def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq,
555
+ sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args):
556
+ r"""
557
+ Solve a linear programming problem in standard form:
558
+
559
+ Minimize::
560
+
561
+ c @ x
562
+
563
+ Subject to::
564
+
565
+ A @ x == b
566
+ x >= 0
567
+
568
+ using the interior point method of [4].
569
+
570
+ Parameters
571
+ ----------
572
+ A : 2-D array
573
+ 2-D array such that ``A @ x``, gives the values of the equality
574
+ constraints at ``x``.
575
+ b : 1-D array
576
+ 1-D array of values representing the RHS of each equality constraint
577
+ (row) in ``A`` (for standard form problem).
578
+ c : 1-D array
579
+ Coefficients of the linear objective function to be minimized (for
580
+ standard form problem).
581
+ c0 : float
582
+ Constant term in objective function due to fixed (and eliminated)
583
+ variables. (Purely for display.)
584
+ alpha0 : float
585
+ The maximal step size for Mehrota's predictor-corrector search
586
+ direction; see :math:`\beta_3`of [4] Table 8.1
587
+ beta : float
588
+ The desired reduction of the path parameter :math:`\mu` (see [6]_)
589
+ maxiter : int
590
+ The maximum number of iterations of the algorithm.
591
+ disp : bool
592
+ Set to ``True`` if indicators of optimization status are to be printed
593
+ to the console each iteration.
594
+ tol : float
595
+ Termination tolerance; see [4]_ Section 4.5.
596
+ sparse : bool
597
+ Set to ``True`` if the problem is to be treated as sparse. However,
598
+ the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as
599
+ (dense) arrays rather than sparse matrices.
600
+ lstsq : bool
601
+ Set to ``True`` if the problem is expected to be very poorly
602
+ conditioned. This should always be left as ``False`` unless severe
603
+ numerical difficulties are frequently encountered, and a better option
604
+ would be to improve the formulation of the problem.
605
+ sym_pos : bool
606
+ Leave ``True`` if the problem is expected to yield a well conditioned
607
+ symmetric positive definite normal equation matrix (almost always).
608
+ cholesky : bool
609
+ Set to ``True`` if the normal equations are to be solved by explicit
610
+ Cholesky decomposition followed by explicit forward/backward
611
+ substitution. This is typically faster for moderate, dense problems
612
+ that are numerically well-behaved.
613
+ pc : bool
614
+ Leave ``True`` if the predictor-corrector method of Mehrota is to be
615
+ used. This is almost always (if not always) beneficial.
616
+ ip : bool
617
+ Set to ``True`` if the improved initial point suggestion due to [4]_
618
+ Section 4.3 is desired. It's unclear whether this is beneficial.
619
+ permc_spec : str (default = 'MMD_AT_PLUS_A')
620
+ (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
621
+ True``.) A matrix is factorized in each iteration of the algorithm.
622
+ This option specifies how to permute the columns of the matrix for
623
+ sparsity preservation. Acceptable values are:
624
+
625
+ - ``NATURAL``: natural ordering.
626
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
627
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
628
+ - ``COLAMD``: approximate minimum degree column ordering.
629
+
630
+ This option can impact the convergence of the
631
+ interior point algorithm; test different values to determine which
632
+ performs best for your problem. For more information, refer to
633
+ ``scipy.sparse.linalg.splu``.
634
+ callback : callable, optional
635
+ If a callback function is provided, it will be called within each
636
+ iteration of the algorithm. The callback function must accept a single
637
+ `scipy.optimize.OptimizeResult` consisting of the following fields:
638
+
639
+ x : 1-D array
640
+ Current solution vector
641
+ fun : float
642
+ Current value of the objective function
643
+ success : bool
644
+ True only when an algorithm has completed successfully,
645
+ so this is always False as the callback function is called
646
+ only while the algorithm is still iterating.
647
+ slack : 1-D array
648
+ The values of the slack variables. Each slack variable
649
+ corresponds to an inequality constraint. If the slack is zero,
650
+ the corresponding constraint is active.
651
+ con : 1-D array
652
+ The (nominally zero) residuals of the equality constraints,
653
+ that is, ``b - A_eq @ x``
654
+ phase : int
655
+ The phase of the algorithm being executed. This is always
656
+ 1 for the interior-point method because it has only one phase.
657
+ status : int
658
+ For revised simplex, this is always 0 because if a different
659
+ status is detected, the algorithm terminates.
660
+ nit : int
661
+ The number of iterations performed.
662
+ message : str
663
+ A string descriptor of the exit status of the optimization.
664
+ postsolve_args : tuple
665
+ Data needed by _postsolve to convert the solution to the standard-form
666
+ problem into the solution to the original problem.
667
+
668
+ Returns
669
+ -------
670
+ x_hat : float
671
+ Solution vector (for standard form problem).
672
+ status : int
673
+ An integer representing the exit status of the optimization::
674
+
675
+ 0 : Optimization terminated successfully
676
+ 1 : Iteration limit reached
677
+ 2 : Problem appears to be infeasible
678
+ 3 : Problem appears to be unbounded
679
+ 4 : Serious numerical difficulties encountered
680
+
681
+ message : str
682
+ A string descriptor of the exit status of the optimization.
683
+ iteration : int
684
+ The number of iterations taken to solve the problem
685
+
686
+ References
687
+ ----------
688
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
689
+ optimizer for linear programming: an implementation of the
690
+ homogeneous algorithm." High performance optimization. Springer US,
691
+ 2000. 197-232.
692
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
693
+ Programming based on Newton's Method." Unpublished Course Notes,
694
+ March 2004. Available 2/25/2017 at:
695
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
696
+
697
+ """
698
+
699
+ iteration = 0
700
+
701
+ # default initial point
702
+ x, y, z, tau, kappa = _get_blind_start(A.shape)
703
+
704
+ # first iteration is special improvement of initial point
705
+ ip = ip if pc else False
706
+
707
+ # [4] 4.5
708
+ rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
709
+ A, b, c, c0, x, y, z, tau, kappa)
710
+ go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : )
711
+
712
+ if disp:
713
+ _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True)
714
+ if callback is not None:
715
+ x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
716
+ res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
717
+ 'con': con, 'nit': iteration, 'phase': 1,
718
+ 'complete': False, 'status': 0,
719
+ 'message': "", 'success': False})
720
+ callback(res)
721
+
722
+ status = 0
723
+ message = "Optimization terminated successfully."
724
+
725
+ if sparse:
726
+ A = sps.csc_matrix(A)
727
+
728
+ while go:
729
+
730
+ iteration += 1
731
+
732
+ if ip: # initial point
733
+ # [4] Section 4.4
734
+ gamma = 1
735
+
736
+ def eta(g):
737
+ return 1
738
+ else:
739
+ # gamma = 0 in predictor step according to [4] 4.1
740
+ # if predictor/corrector is off, use mean of complementarity [6]
741
+ # 5.1 / [4] Below Figure 10-4
742
+ gamma = 0 if pc else beta * np.mean(z * x)
743
+ # [4] Section 4.1
744
+
745
+ def eta(g=gamma):
746
+ return 1 - g
747
+
748
+ try:
749
+ # Solve [4] 8.6 and 8.7/8.13/8.23
750
+ d_x, d_y, d_z, d_tau, d_kappa = _get_delta(
751
+ A, b, c, x, y, z, tau, kappa, gamma, eta,
752
+ sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec)
753
+
754
+ if ip: # initial point
755
+ # [4] 4.4
756
+ # Formula after 8.23 takes a full step regardless if this will
757
+ # take it negative
758
+ alpha = 1.0
759
+ x, y, z, tau, kappa = _do_step(
760
+ x, y, z, tau, kappa, d_x, d_y,
761
+ d_z, d_tau, d_kappa, alpha)
762
+ x[x < 1] = 1
763
+ z[z < 1] = 1
764
+ tau = max(1, tau)
765
+ kappa = max(1, kappa)
766
+ ip = False # done with initial point
767
+ else:
768
+ # [4] Section 4.3
769
+ alpha = _get_step(x, d_x, z, d_z, tau,
770
+ d_tau, kappa, d_kappa, alpha0)
771
+ # [4] Equation 8.9
772
+ x, y, z, tau, kappa = _do_step(
773
+ x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha)
774
+
775
+ except (LinAlgError, FloatingPointError,
776
+ ValueError, ZeroDivisionError):
777
+ # this can happen when sparse solver is used and presolve
778
+ # is turned off. Also observed ValueError in AppVeyor Python 3.6
779
+ # Win32 build (PR #8676). I've never seen it otherwise.
780
+ status = 4
781
+ message = _get_message(status)
782
+ break
783
+
784
+ # [4] 4.5
785
+ rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
786
+ A, b, c, c0, x, y, z, tau, kappa)
787
+ go = rho_p > tol or rho_d > tol or rho_A > tol
788
+
789
+ if disp:
790
+ _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj)
791
+ if callback is not None:
792
+ x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
793
+ res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
794
+ 'con': con, 'nit': iteration, 'phase': 1,
795
+ 'complete': False, 'status': 0,
796
+ 'message': "", 'success': False})
797
+ callback(res)
798
+
799
+ # [4] 4.5
800
+ inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol *
801
+ max(1, kappa))
802
+ inf2 = rho_mu < tol and tau < tol * min(1, kappa)
803
+ if inf1 or inf2:
804
+ # [4] Lemma 8.4 / Theorem 8.3
805
+ if b.transpose().dot(y) > tol:
806
+ status = 2
807
+ else: # elif c.T.dot(x) < tol: ? Probably not necessary.
808
+ status = 3
809
+ message = _get_message(status)
810
+ break
811
+ elif iteration >= maxiter:
812
+ status = 1
813
+ message = _get_message(status)
814
+ break
815
+
816
+ x_hat = x / tau
817
+ # [4] Statement after Theorem 8.2
818
+ return x_hat, status, message, iteration
819
+
820
+
821
+ def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8,
822
+ disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False,
823
+ sym_pos=True, cholesky=None, pc=True, ip=False,
824
+ permc_spec='MMD_AT_PLUS_A', **unknown_options):
825
+ r"""
826
+ Minimize a linear objective function subject to linear
827
+ equality and non-negativity constraints using the interior point method
828
+ of [4]_. Linear programming is intended to solve problems
829
+ of the following form:
830
+
831
+ Minimize::
832
+
833
+ c @ x
834
+
835
+ Subject to::
836
+
837
+ A @ x == b
838
+ x >= 0
839
+
840
+ User-facing documentation is in _linprog_doc.py.
841
+
842
+ Parameters
843
+ ----------
844
+ c : 1-D array
845
+ Coefficients of the linear objective function to be minimized.
846
+ c0 : float
847
+ Constant term in objective function due to fixed (and eliminated)
848
+ variables. (Purely for display.)
849
+ A : 2-D array
850
+ 2-D array such that ``A @ x``, gives the values of the equality
851
+ constraints at ``x``.
852
+ b : 1-D array
853
+ 1-D array of values representing the right hand side of each equality
854
+ constraint (row) in ``A``.
855
+ callback : callable, optional
856
+ Callback function to be executed once per iteration.
857
+ postsolve_args : tuple
858
+ Data needed by _postsolve to convert the solution to the standard-form
859
+ problem into the solution to the original problem.
860
+
861
+ Options
862
+ -------
863
+ maxiter : int (default = 1000)
864
+ The maximum number of iterations of the algorithm.
865
+ tol : float (default = 1e-8)
866
+ Termination tolerance to be used for all termination criteria;
867
+ see [4]_ Section 4.5.
868
+ disp : bool (default = False)
869
+ Set to ``True`` if indicators of optimization status are to be printed
870
+ to the console each iteration.
871
+ alpha0 : float (default = 0.99995)
872
+ The maximal step size for Mehrota's predictor-corrector search
873
+ direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
874
+ beta : float (default = 0.1)
875
+ The desired reduction of the path parameter :math:`\mu` (see [6]_)
876
+ when Mehrota's predictor-corrector is not in use (uncommon).
877
+ sparse : bool (default = False)
878
+ Set to ``True`` if the problem is to be treated as sparse after
879
+ presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
880
+ this option will automatically be set ``True``, and the problem
881
+ will be treated as sparse even during presolve. If your constraint
882
+ matrices contain mostly zeros and the problem is not very small (less
883
+ than about 100 constraints or variables), consider setting ``True``
884
+ or providing ``A_eq`` and ``A_ub`` as sparse matrices.
885
+ lstsq : bool (default = False)
886
+ Set to ``True`` if the problem is expected to be very poorly
887
+ conditioned. This should always be left ``False`` unless severe
888
+ numerical difficulties are encountered. Leave this at the default
889
+ unless you receive a warning message suggesting otherwise.
890
+ sym_pos : bool (default = True)
891
+ Leave ``True`` if the problem is expected to yield a well conditioned
892
+ symmetric positive definite normal equation matrix
893
+ (almost always). Leave this at the default unless you receive
894
+ a warning message suggesting otherwise.
895
+ cholesky : bool (default = True)
896
+ Set to ``True`` if the normal equations are to be solved by explicit
897
+ Cholesky decomposition followed by explicit forward/backward
898
+ substitution. This is typically faster for problems
899
+ that are numerically well-behaved.
900
+ pc : bool (default = True)
901
+ Leave ``True`` if the predictor-corrector method of Mehrota is to be
902
+ used. This is almost always (if not always) beneficial.
903
+ ip : bool (default = False)
904
+ Set to ``True`` if the improved initial point suggestion due to [4]_
905
+ Section 4.3 is desired. Whether this is beneficial or not
906
+ depends on the problem.
907
+ permc_spec : str (default = 'MMD_AT_PLUS_A')
908
+ (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
909
+ True``, and no SuiteSparse.)
910
+ A matrix is factorized in each iteration of the algorithm.
911
+ This option specifies how to permute the columns of the matrix for
912
+ sparsity preservation. Acceptable values are:
913
+
914
+ - ``NATURAL``: natural ordering.
915
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
916
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
917
+ - ``COLAMD``: approximate minimum degree column ordering.
918
+
919
+ This option can impact the convergence of the
920
+ interior point algorithm; test different values to determine which
921
+ performs best for your problem. For more information, refer to
922
+ ``scipy.sparse.linalg.splu``.
923
+ unknown_options : dict
924
+ Optional arguments not used by this particular solver. If
925
+ `unknown_options` is non-empty a warning is issued listing all
926
+ unused options.
927
+
928
+ Returns
929
+ -------
930
+ x : 1-D array
931
+ Solution vector.
932
+ status : int
933
+ An integer representing the exit status of the optimization::
934
+
935
+ 0 : Optimization terminated successfully
936
+ 1 : Iteration limit reached
937
+ 2 : Problem appears to be infeasible
938
+ 3 : Problem appears to be unbounded
939
+ 4 : Serious numerical difficulties encountered
940
+
941
+ message : str
942
+ A string descriptor of the exit status of the optimization.
943
+ iteration : int
944
+ The number of iterations taken to solve the problem.
945
+
946
+ Notes
947
+ -----
948
+ This method implements the algorithm outlined in [4]_ with ideas from [8]_
949
+ and a structure inspired by the simpler methods of [6]_.
950
+
951
+ The primal-dual path following method begins with initial 'guesses' of
952
+ the primal and dual variables of the standard form problem and iteratively
953
+ attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
954
+ problem with a gradually reduced logarithmic barrier term added to the
955
+ objective. This particular implementation uses a homogeneous self-dual
956
+ formulation, which provides certificates of infeasibility or unboundedness
957
+ where applicable.
958
+
959
+ The default initial point for the primal and dual variables is that
960
+ defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
961
+ point option ``ip=True``), an alternate (potentially improved) starting
962
+ point can be calculated according to the additional recommendations of
963
+ [4]_ Section 4.4.
964
+
965
+ A search direction is calculated using the predictor-corrector method
966
+ (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
967
+ (A potential improvement would be to implement the method of multiple
968
+ corrections described in [4]_ Section 4.2.) In practice, this is
969
+ accomplished by solving the normal equations, [4]_ Section 5.1 Equations
970
+ 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
971
+ 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
972
+ solving the normal equations rather than 8.25 directly is that the
973
+ matrices involved are symmetric positive definite, so Cholesky
974
+ decomposition can be used rather than the more expensive LU factorization.
975
+
976
+ With default options, the solver used to perform the factorization depends
977
+ on third-party software availability and the conditioning of the problem.
978
+
979
+ For dense problems, solvers are tried in the following order:
980
+
981
+ 1. ``scipy.linalg.cho_factor``
982
+
983
+ 2. ``scipy.linalg.solve`` with option ``sym_pos=True``
984
+
985
+ 3. ``scipy.linalg.solve`` with option ``sym_pos=False``
986
+
987
+ 4. ``scipy.linalg.lstsq``
988
+
989
+ For sparse problems:
990
+
991
+ 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed)
992
+
993
+ 2. ``scipy.sparse.linalg.factorized``
994
+ (if scikit-umfpack and SuiteSparse are installed)
995
+
996
+ 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
997
+
998
+ 4. ``scipy.sparse.linalg.lsqr``
999
+
1000
+ If the solver fails for any reason, successively more robust (but slower)
1001
+ solvers are attempted in the order indicated. Attempting, failing, and
1002
+ re-starting factorization can be time consuming, so if the problem is
1003
+ numerically challenging, options can be set to bypass solvers that are
1004
+ failing. Setting ``cholesky=False`` skips to solver 2,
1005
+ ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
1006
+ to solver 4 for both sparse and dense problems.
1007
+
1008
+ Potential improvements for combatting issues associated with dense
1009
+ columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
1010
+ [10]_ Section 4.1-4.2; the latter also discusses the alleviation of
1011
+ accuracy issues associated with the substitution approach to free
1012
+ variables.
1013
+
1014
+ After calculating the search direction, the maximum possible step size
1015
+ that does not activate the non-negativity constraints is calculated, and
1016
+ the smaller of this step size and unity is applied (as in [4]_ Section
1017
+ 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
1018
+
1019
+ The new point is tested according to the termination conditions of [4]_
1020
+ Section 4.5. The same tolerance, which can be set using the ``tol`` option,
1021
+ is used for all checks. (A potential improvement would be to expose
1022
+ the different tolerances to be set independently.) If optimality,
1023
+ unboundedness, or infeasibility is detected, the solve procedure
1024
+ terminates; otherwise it repeats.
1025
+
1026
+ The expected problem formulation differs between the top level ``linprog``
1027
+ module and the method specific solvers. The method specific solvers expect a
1028
+ problem in standard form:
1029
+
1030
+ Minimize::
1031
+
1032
+ c @ x
1033
+
1034
+ Subject to::
1035
+
1036
+ A @ x == b
1037
+ x >= 0
1038
+
1039
+ Whereas the top level ``linprog`` module expects a problem of form:
1040
+
1041
+ Minimize::
1042
+
1043
+ c @ x
1044
+
1045
+ Subject to::
1046
+
1047
+ A_ub @ x <= b_ub
1048
+ A_eq @ x == b_eq
1049
+ lb <= x <= ub
1050
+
1051
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
1052
+
1053
+ The original problem contains equality, upper-bound and variable constraints
1054
+ whereas the method specific solver requires equality constraints and
1055
+ variable non-negativity.
1056
+
1057
+ ``linprog`` module converts the original problem to standard form by
1058
+ converting the simple bounds to upper bound constraints, introducing
1059
+ non-negative slack variables for inequality constraints, and expressing
1060
+ unbounded variables as the difference between two non-negative variables.
1061
+
1062
+
1063
+ References
1064
+ ----------
1065
+ .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
1066
+ optimizer for linear programming: an implementation of the
1067
+ homogeneous algorithm." High performance optimization. Springer US,
1068
+ 2000. 197-232.
1069
+ .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
1070
+ Programming based on Newton's Method." Unpublished Course Notes,
1071
+ March 2004. Available 2/25/2017 at
1072
+ https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
1073
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
1074
+ programming." Mathematical Programming 71.2 (1995): 221-245.
1075
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1076
+ programming." Athena Scientific 1 (1997): 997.
1077
+ .. [10] Andersen, Erling D., et al. Implementation of interior point methods
1078
+ for large scale linear programming. HEC/Universite de Geneve, 1996.
1079
+
1080
+ """
1081
+
1082
+ _check_unknown_options(unknown_options)
1083
+
1084
+ # These should be warnings, not errors
1085
+ if (cholesky or cholesky is None) and sparse and not has_cholmod:
1086
+ if cholesky:
1087
+ warn("Sparse cholesky is only available with scikit-sparse. "
1088
+ "Setting `cholesky = False`",
1089
+ OptimizeWarning, stacklevel=3)
1090
+ cholesky = False
1091
+
1092
+ if sparse and lstsq:
1093
+ warn("Option combination 'sparse':True and 'lstsq':True "
1094
+ "is not recommended.",
1095
+ OptimizeWarning, stacklevel=3)
1096
+
1097
+ if lstsq and cholesky:
1098
+ warn("Invalid option combination 'lstsq':True "
1099
+ "and 'cholesky':True; option 'cholesky' has no effect when "
1100
+ "'lstsq' is set True.",
1101
+ OptimizeWarning, stacklevel=3)
1102
+
1103
+ valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD')
1104
+ if permc_spec.upper() not in valid_permc_spec:
1105
+ warn("Invalid permc_spec option: '" + str(permc_spec) + "'. "
1106
+ "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', "
1107
+ "and 'COLAMD'. Reverting to default.",
1108
+ OptimizeWarning, stacklevel=3)
1109
+ permc_spec = 'MMD_AT_PLUS_A'
1110
+
1111
+ # This can be an error
1112
+ if not sym_pos and cholesky:
1113
+ raise ValueError(
1114
+ "Invalid option combination 'sym_pos':False "
1115
+ "and 'cholesky':True: Cholesky decomposition is only possible "
1116
+ "for symmetric positive definite matrices.")
1117
+
1118
+ cholesky = cholesky or (cholesky is None and sym_pos and not lstsq)
1119
+
1120
+ x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta,
1121
+ maxiter, disp, tol, sparse,
1122
+ lstsq, sym_pos, cholesky,
1123
+ pc, ip, permc_spec, callback,
1124
+ postsolve_args)
1125
+
1126
+ return x, status, message, iteration
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Method agnostic utility functions for linear programming
3
+ """
4
+
5
+ import numpy as np
6
+ import scipy.sparse as sps
7
+ from warnings import warn
8
+ from ._optimize import OptimizeWarning
9
+ from scipy.optimize._remove_redundancy import (
10
+ _remove_redundancy_svd, _remove_redundancy_pivot_sparse,
11
+ _remove_redundancy_pivot_dense, _remove_redundancy_id
12
+ )
13
+ from collections import namedtuple
14
+
15
+ _LPProblem = namedtuple('_LPProblem',
16
+ 'c A_ub b_ub A_eq b_eq bounds x0 integrality')
17
+ _LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg
18
+ _LPProblem.__doc__ = \
19
+ """ Represents a linear-programming problem.
20
+
21
+ Attributes
22
+ ----------
23
+ c : 1D array
24
+ The coefficients of the linear objective function to be minimized.
25
+ A_ub : 2D array, optional
26
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
27
+ coefficients of a linear inequality constraint on ``x``.
28
+ b_ub : 1D array, optional
29
+ The inequality constraint vector. Each element represents an
30
+ upper bound on the corresponding value of ``A_ub @ x``.
31
+ A_eq : 2D array, optional
32
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
33
+ coefficients of a linear equality constraint on ``x``.
34
+ b_eq : 1D array, optional
35
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
36
+ the corresponding element of ``b_eq``.
37
+ bounds : various valid formats, optional
38
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
39
+ If bounds are specified for all N variables separately, valid formats
40
+ are:
41
+ * a 2D array (N x 2);
42
+ * a sequence of N sequences, each with 2 values.
43
+ If all variables have the same bounds, the bounds can be specified as
44
+ a 1-D or 2-D array or sequence with 2 scalar values.
45
+ If all variables have a lower bound of 0 and no upper bound, the bounds
46
+ parameter can be omitted (or given as None).
47
+ Absent lower and/or upper bounds can be specified as -numpy.inf (no
48
+ lower bound), numpy.inf (no upper bound) or None (both).
49
+ x0 : 1D array, optional
50
+ Guess values of the decision variables, which will be refined by
51
+ the optimization algorithm. This argument is currently used only by the
52
+ 'revised simplex' method, and can only be used if `x0` represents a
53
+ basic feasible solution.
54
+ integrality : 1-D array or int, optional
55
+ Indicates the type of integrality constraint on each decision variable.
56
+
57
+ ``0`` : Continuous variable; no integrality constraint.
58
+
59
+ ``1`` : Integer variable; decision variable must be an integer
60
+ within `bounds`.
61
+
62
+ ``2`` : Semi-continuous variable; decision variable must be within
63
+ `bounds` or take value ``0``.
64
+
65
+ ``3`` : Semi-integer variable; decision variable must be an integer
66
+ within `bounds` or take value ``0``.
67
+
68
+ By default, all variables are continuous.
69
+
70
+ For mixed integrality constraints, supply an array of shape `c.shape`.
71
+ To infer a constraint on each decision variable from shorter inputs,
72
+ the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
73
+
74
+ This argument is currently used only by the ``'highs'`` method and
75
+ ignored otherwise.
76
+
77
+ Notes
78
+ -----
79
+ This namedtuple supports 2 ways of initialization:
80
+ >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
81
+ >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
82
+
83
+ Note that only ``c`` is a required argument here, whereas all other arguments
84
+ ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
85
+ default values of None.
86
+ For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
87
+ >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
88
+ """
89
+
90
+
91
+ def _check_sparse_inputs(options, meth, A_ub, A_eq):
92
+ """
93
+ Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
94
+ optional sparsity variables.
95
+
96
+ Parameters
97
+ ----------
98
+ A_ub : 2-D array, optional
99
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
100
+ inequality constraints at ``x``.
101
+ A_eq : 2-D array, optional
102
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
103
+ constraints at ``x``.
104
+ options : dict
105
+ A dictionary of solver options. All methods accept the following
106
+ generic options:
107
+
108
+ maxiter : int
109
+ Maximum number of iterations to perform.
110
+ disp : bool
111
+ Set to True to print convergence messages.
112
+
113
+ For method-specific options, see :func:`show_options('linprog')`.
114
+ method : str, optional
115
+ The algorithm used to solve the standard form problem.
116
+
117
+ Returns
118
+ -------
119
+ A_ub : 2-D array, optional
120
+ 2-D array such that ``A_ub @ x`` gives the values of the upper-bound
121
+ inequality constraints at ``x``.
122
+ A_eq : 2-D array, optional
123
+ 2-D array such that ``A_eq @ x`` gives the values of the equality
124
+ constraints at ``x``.
125
+ options : dict
126
+ A dictionary of solver options. All methods accept the following
127
+ generic options:
128
+
129
+ maxiter : int
130
+ Maximum number of iterations to perform.
131
+ disp : bool
132
+ Set to True to print convergence messages.
133
+
134
+ For method-specific options, see :func:`show_options('linprog')`.
135
+ """
136
+ # This is an undocumented option for unit testing sparse presolve
137
+ _sparse_presolve = options.pop('_sparse_presolve', False)
138
+ if _sparse_presolve and A_eq is not None:
139
+ A_eq = sps.coo_matrix(A_eq)
140
+ if _sparse_presolve and A_ub is not None:
141
+ A_ub = sps.coo_matrix(A_ub)
142
+
143
+ sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
144
+
145
+ preferred_methods = {"highs", "highs-ds", "highs-ipm"}
146
+ dense_methods = {"simplex", "revised simplex"}
147
+ if meth in dense_methods and sparse_constraint:
148
+ raise ValueError(f"Method '{meth}' does not support sparse "
149
+ "constraint matrices. Please consider using one of "
150
+ f"{preferred_methods}.")
151
+
152
+ sparse = options.get('sparse', False)
153
+ if not sparse and sparse_constraint and meth == 'interior-point':
154
+ options['sparse'] = True
155
+ warn("Sparse constraint matrix detected; setting 'sparse':True.",
156
+ OptimizeWarning, stacklevel=4)
157
+ return options, A_ub, A_eq
158
+
159
+
160
+ def _format_A_constraints(A, n_x, sparse_lhs=False):
161
+ """Format the left hand side of the constraints to a 2-D array
162
+
163
+ Parameters
164
+ ----------
165
+ A : 2-D array
166
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
167
+ (in)equality constraints at ``x``.
168
+ n_x : int
169
+ The number of variables in the linear programming problem.
170
+ sparse_lhs : bool
171
+ Whether either of `A_ub` or `A_eq` are sparse. If true return a
172
+ coo_matrix instead of a numpy array.
173
+
174
+ Returns
175
+ -------
176
+ np.ndarray or sparse.coo_matrix
177
+ 2-D array such that ``A @ x`` gives the values of the upper-bound
178
+ (in)equality constraints at ``x``.
179
+
180
+ """
181
+ if sparse_lhs:
182
+ return sps.coo_matrix(
183
+ (0, n_x) if A is None else A, dtype=float, copy=True
184
+ )
185
+ elif A is None:
186
+ return np.zeros((0, n_x), dtype=float)
187
+ else:
188
+ return np.array(A, dtype=float, copy=True)
189
+
190
+
191
+ def _format_b_constraints(b):
192
+ """Format the upper bounds of the constraints to a 1-D array
193
+
194
+ Parameters
195
+ ----------
196
+ b : 1-D array
197
+ 1-D array of values representing the upper-bound of each (in)equality
198
+ constraint (row) in ``A``.
199
+
200
+ Returns
201
+ -------
202
+ 1-D np.array
203
+ 1-D array of values representing the upper-bound of each (in)equality
204
+ constraint (row) in ``A``.
205
+
206
+ """
207
+ if b is None:
208
+ return np.array([], dtype=float)
209
+ b = np.array(b, dtype=float, copy=True).squeeze()
210
+ return b if b.size != 1 else b.reshape(-1)
211
+
212
+
213
+ def _clean_inputs(lp):
214
+ """
215
+ Given user inputs for a linear programming problem, return the
216
+ objective vector, upper bound constraints, equality constraints,
217
+ and simple bounds in a preferred format.
218
+
219
+ Parameters
220
+ ----------
221
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
222
+
223
+ c : 1D array
224
+ The coefficients of the linear objective function to be minimized.
225
+ A_ub : 2D array, optional
226
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
227
+ coefficients of a linear inequality constraint on ``x``.
228
+ b_ub : 1D array, optional
229
+ The inequality constraint vector. Each element represents an
230
+ upper bound on the corresponding value of ``A_ub @ x``.
231
+ A_eq : 2D array, optional
232
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
233
+ coefficients of a linear equality constraint on ``x``.
234
+ b_eq : 1D array, optional
235
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
236
+ the corresponding element of ``b_eq``.
237
+ bounds : various valid formats, optional
238
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
239
+ If bounds are specified for all N variables separately, valid formats are:
240
+ * a 2D array (2 x N or N x 2);
241
+ * a sequence of N sequences, each with 2 values.
242
+ If all variables have the same bounds, a single pair of values can
243
+ be specified. Valid formats are:
244
+ * a sequence with 2 scalar values;
245
+ * a sequence with a single element containing 2 scalar values.
246
+ If all variables have a lower bound of 0 and no upper bound, the bounds
247
+ parameter can be omitted (or given as None).
248
+ x0 : 1D array, optional
249
+ Guess values of the decision variables, which will be refined by
250
+ the optimization algorithm. This argument is currently used only by the
251
+ 'revised simplex' method, and can only be used if `x0` represents a
252
+ basic feasible solution.
253
+
254
+ Returns
255
+ -------
256
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
257
+
258
+ c : 1D array
259
+ The coefficients of the linear objective function to be minimized.
260
+ A_ub : 2D array, optional
261
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
262
+ coefficients of a linear inequality constraint on ``x``.
263
+ b_ub : 1D array, optional
264
+ The inequality constraint vector. Each element represents an
265
+ upper bound on the corresponding value of ``A_ub @ x``.
266
+ A_eq : 2D array, optional
267
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
268
+ coefficients of a linear equality constraint on ``x``.
269
+ b_eq : 1D array, optional
270
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
271
+ the corresponding element of ``b_eq``.
272
+ bounds : 2D array
273
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
274
+ elements of ``x``. The N x 2 array contains lower bounds in the first
275
+ column and upper bounds in the 2nd. Unbounded variables have lower
276
+ bound -np.inf and/or upper bound np.inf.
277
+ x0 : 1D array, optional
278
+ Guess values of the decision variables, which will be refined by
279
+ the optimization algorithm. This argument is currently used only by the
280
+ 'revised simplex' method, and can only be used if `x0` represents a
281
+ basic feasible solution.
282
+
283
+ """
284
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
285
+
286
+ if c is None:
287
+ raise TypeError
288
+
289
+ try:
290
+ c = np.array(c, dtype=np.float64, copy=True).squeeze()
291
+ except ValueError as e:
292
+ raise TypeError(
293
+ "Invalid input for linprog: c must be a 1-D array of numerical "
294
+ "coefficients") from e
295
+ else:
296
+ # If c is a single value, convert it to a 1-D array.
297
+ if c.size == 1:
298
+ c = c.reshape(-1)
299
+
300
+ n_x = len(c)
301
+ if n_x == 0 or len(c.shape) != 1:
302
+ raise ValueError(
303
+ "Invalid input for linprog: c must be a 1-D array and must "
304
+ "not have more than one non-singleton dimension")
305
+ if not np.isfinite(c).all():
306
+ raise ValueError(
307
+ "Invalid input for linprog: c must not contain values "
308
+ "inf, nan, or None")
309
+
310
+ sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
311
+ try:
312
+ A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
313
+ except ValueError as e:
314
+ raise TypeError(
315
+ "Invalid input for linprog: A_ub must be a 2-D array "
316
+ "of numerical values") from e
317
+ else:
318
+ n_ub = A_ub.shape[0]
319
+ if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
320
+ raise ValueError(
321
+ "Invalid input for linprog: A_ub must have exactly two "
322
+ "dimensions, and the number of columns in A_ub must be "
323
+ "equal to the size of c")
324
+ if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
325
+ or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
326
+ raise ValueError(
327
+ "Invalid input for linprog: A_ub must not contain values "
328
+ "inf, nan, or None")
329
+
330
+ try:
331
+ b_ub = _format_b_constraints(b_ub)
332
+ except ValueError as e:
333
+ raise TypeError(
334
+ "Invalid input for linprog: b_ub must be a 1-D array of "
335
+ "numerical values, each representing the upper bound of an "
336
+ "inequality constraint (row) in A_ub") from e
337
+ else:
338
+ if b_ub.shape != (n_ub,):
339
+ raise ValueError(
340
+ "Invalid input for linprog: b_ub must be a 1-D array; b_ub "
341
+ "must not have more than one non-singleton dimension and "
342
+ "the number of rows in A_ub must equal the number of values "
343
+ "in b_ub")
344
+ if not np.isfinite(b_ub).all():
345
+ raise ValueError(
346
+ "Invalid input for linprog: b_ub must not contain values "
347
+ "inf, nan, or None")
348
+
349
+ try:
350
+ A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
351
+ except ValueError as e:
352
+ raise TypeError(
353
+ "Invalid input for linprog: A_eq must be a 2-D array "
354
+ "of numerical values") from e
355
+ else:
356
+ n_eq = A_eq.shape[0]
357
+ if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
358
+ raise ValueError(
359
+ "Invalid input for linprog: A_eq must have exactly two "
360
+ "dimensions, and the number of columns in A_eq must be "
361
+ "equal to the size of c")
362
+
363
+ if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
364
+ or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
365
+ raise ValueError(
366
+ "Invalid input for linprog: A_eq must not contain values "
367
+ "inf, nan, or None")
368
+
369
+ try:
370
+ b_eq = _format_b_constraints(b_eq)
371
+ except ValueError as e:
372
+ raise TypeError(
373
+ "Invalid input for linprog: b_eq must be a dense, 1-D array of "
374
+ "numerical values, each representing the right hand side of an "
375
+ "equality constraint (row) in A_eq") from e
376
+ else:
377
+ if b_eq.shape != (n_eq,):
378
+ raise ValueError(
379
+ "Invalid input for linprog: b_eq must be a 1-D array; b_eq "
380
+ "must not have more than one non-singleton dimension and "
381
+ "the number of rows in A_eq must equal the number of values "
382
+ "in b_eq")
383
+ if not np.isfinite(b_eq).all():
384
+ raise ValueError(
385
+ "Invalid input for linprog: b_eq must not contain values "
386
+ "inf, nan, or None")
387
+
388
+ # x0 gives a (optional) starting solution to the solver. If x0 is None,
389
+ # skip the checks. Initial solution will be generated automatically.
390
+ if x0 is not None:
391
+ try:
392
+ x0 = np.array(x0, dtype=float, copy=True).squeeze()
393
+ except ValueError as e:
394
+ raise TypeError(
395
+ "Invalid input for linprog: x0 must be a 1-D array of "
396
+ "numerical coefficients") from e
397
+ if x0.ndim == 0:
398
+ x0 = x0.reshape(-1)
399
+ if len(x0) == 0 or x0.ndim != 1:
400
+ raise ValueError(
401
+ "Invalid input for linprog: x0 should be a 1-D array; it "
402
+ "must not have more than one non-singleton dimension")
403
+ if not x0.size == c.size:
404
+ raise ValueError(
405
+ "Invalid input for linprog: x0 and c should contain the "
406
+ "same number of elements")
407
+ if not np.isfinite(x0).all():
408
+ raise ValueError(
409
+ "Invalid input for linprog: x0 must not contain values "
410
+ "inf, nan, or None")
411
+
412
+ # Bounds can be one of these formats:
413
+ # (1) a 2-D array or sequence, with shape N x 2
414
+ # (2) a 1-D or 2-D sequence or array with 2 scalars
415
+ # (3) None (or an empty sequence or array)
416
+ # Unspecified bounds can be represented by None or (-)np.inf.
417
+ # All formats are converted into a N x 2 np.array with (-)np.inf where
418
+ # bounds are unspecified.
419
+
420
+ # Prepare clean bounds array
421
+ bounds_clean = np.zeros((n_x, 2), dtype=float)
422
+
423
+ # Convert to a numpy array.
424
+ # np.array(..,dtype=float) raises an error if dimensions are inconsistent
425
+ # or if there are invalid data types in bounds. Just add a linprog prefix
426
+ # to the error and re-raise.
427
+ # Creating at least a 2-D array simplifies the cases to distinguish below.
428
+ if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
429
+ bounds = (0, np.inf)
430
+ try:
431
+ bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
432
+ except ValueError as e:
433
+ raise ValueError(
434
+ "Invalid input for linprog: unable to interpret bounds, "
435
+ "check values and dimensions: " + e.args[0]) from e
436
+ except TypeError as e:
437
+ raise TypeError(
438
+ "Invalid input for linprog: unable to interpret bounds, "
439
+ "check values and dimensions: " + e.args[0]) from e
440
+
441
+ # Check bounds options
442
+ bsh = bounds_conv.shape
443
+ if len(bsh) > 2:
444
+ # Do not try to handle multidimensional bounds input
445
+ raise ValueError(
446
+ "Invalid input for linprog: provide a 2-D array for bounds, "
447
+ f"not a {len(bsh):d}-D array.")
448
+ elif np.all(bsh == (n_x, 2)):
449
+ # Regular N x 2 array
450
+ bounds_clean = bounds_conv
451
+ elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
452
+ # 2 values: interpret as overall lower and upper bound
453
+ bounds_flat = bounds_conv.flatten()
454
+ bounds_clean[:, 0] = bounds_flat[0]
455
+ bounds_clean[:, 1] = bounds_flat[1]
456
+ elif np.all(bsh == (2, n_x)):
457
+ # Reject a 2 x N array
458
+ raise ValueError(
459
+ f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, "
460
+ f"not a 2 x {n_x:d} array.")
461
+ else:
462
+ raise ValueError(
463
+ "Invalid input for linprog: unable to interpret bounds with this "
464
+ f"dimension tuple: {bsh}.")
465
+
466
+ # The process above creates nan-s where the input specified None
467
+ # Convert the nan-s in the 1st column to -np.inf and in the 2nd column
468
+ # to np.inf
469
+ i_none = np.isnan(bounds_clean[:, 0])
470
+ bounds_clean[i_none, 0] = -np.inf
471
+ i_none = np.isnan(bounds_clean[:, 1])
472
+ bounds_clean[i_none, 1] = np.inf
473
+
474
+ return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
475
+
476
+
477
+ def _presolve(lp, rr, rr_method, tol=1e-9):
478
+ """
479
+ Given inputs for a linear programming problem in preferred format,
480
+ presolve the problem: identify trivial infeasibilities, redundancies,
481
+ and unboundedness, tighten bounds where possible, and eliminate fixed
482
+ variables.
483
+
484
+ Parameters
485
+ ----------
486
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
487
+
488
+ c : 1D array
489
+ The coefficients of the linear objective function to be minimized.
490
+ A_ub : 2D array, optional
491
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
492
+ coefficients of a linear inequality constraint on ``x``.
493
+ b_ub : 1D array, optional
494
+ The inequality constraint vector. Each element represents an
495
+ upper bound on the corresponding value of ``A_ub @ x``.
496
+ A_eq : 2D array, optional
497
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
498
+ coefficients of a linear equality constraint on ``x``.
499
+ b_eq : 1D array, optional
500
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
501
+ the corresponding element of ``b_eq``.
502
+ bounds : 2D array
503
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
504
+ elements of ``x``. The N x 2 array contains lower bounds in the first
505
+ column and upper bounds in the 2nd. Unbounded variables have lower
506
+ bound -np.inf and/or upper bound np.inf.
507
+ x0 : 1D array, optional
508
+ Guess values of the decision variables, which will be refined by
509
+ the optimization algorithm. This argument is currently used only by the
510
+ 'revised simplex' method, and can only be used if `x0` represents a
511
+ basic feasible solution.
512
+
513
+ rr : bool
514
+ If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
515
+ Set False if ``A_eq`` is known to be of full row rank, or if you are
516
+ looking for a potential speedup (at the expense of reliability).
517
+ rr_method : string
518
+ Method used to identify and remove redundant rows from the
519
+ equality constraint matrix after presolve.
520
+ tol : float
521
+ The tolerance which determines when a solution is "close enough" to
522
+ zero in Phase 1 to be considered a basic feasible solution or close
523
+ enough to positive to serve as an optimal solution.
524
+
525
+ Returns
526
+ -------
527
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
528
+
529
+ c : 1D array
530
+ The coefficients of the linear objective function to be minimized.
531
+ A_ub : 2D array, optional
532
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
533
+ coefficients of a linear inequality constraint on ``x``.
534
+ b_ub : 1D array, optional
535
+ The inequality constraint vector. Each element represents an
536
+ upper bound on the corresponding value of ``A_ub @ x``.
537
+ A_eq : 2D array, optional
538
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
539
+ coefficients of a linear equality constraint on ``x``.
540
+ b_eq : 1D array, optional
541
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
542
+ the corresponding element of ``b_eq``.
543
+ bounds : 2D array
544
+ The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
545
+ x0 : 1D array, optional
546
+ Guess values of the decision variables, which will be refined by
547
+ the optimization algorithm. This argument is currently used only by the
548
+ 'revised simplex' method, and can only be used if `x0` represents a
549
+ basic feasible solution.
550
+
551
+ c0 : 1D array
552
+ Constant term in objective function due to fixed (and eliminated)
553
+ variables.
554
+ x : 1D array
555
+ Solution vector (when the solution is trivial and can be determined
556
+ in presolve)
557
+ revstack: list of functions
558
+ the functions in the list reverse the operations of _presolve()
559
+ the function signature is x_org = f(x_mod), where x_mod is the result
560
+ of a presolve step and x_org the value at the start of the step
561
+ (currently, the revstack contains only one function)
562
+ complete: bool
563
+ Whether the solution is complete (solved or determined to be infeasible
564
+ or unbounded in presolve)
565
+ status : int
566
+ An integer representing the exit status of the optimization::
567
+
568
+ 0 : Optimization terminated successfully
569
+ 1 : Iteration limit reached
570
+ 2 : Problem appears to be infeasible
571
+ 3 : Problem appears to be unbounded
572
+ 4 : Serious numerical difficulties encountered
573
+
574
+ message : str
575
+ A string descriptor of the exit status of the optimization.
576
+
577
+ References
578
+ ----------
579
+ .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
580
+ large-scale linear programming." Optimization Methods and Software
581
+ 6.3 (1995): 219-227.
582
+ .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
583
+ programming." Mathematical Programming 71.2 (1995): 221-245.
584
+
585
+ """
586
+ # ideas from Reference [5] by Andersen and Andersen
587
+ # however, unlike the reference, this is performed before converting
588
+ # problem to standard form
589
+ # There are a few advantages:
590
+ # * artificial variables have not been added, so matrices are smaller
591
+ # * bounds have not been converted to constraints yet. (It is better to
592
+ # do that after presolve because presolve may adjust the simple bounds.)
593
+ # There are many improvements that can be made, namely:
594
+ # * implement remaining checks from [5]
595
+ # * loop presolve until no additional changes are made
596
+ # * implement additional efficiency improvements in redundancy removal [2]
597
+
598
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
599
+
600
+ revstack = [] # record of variables eliminated from problem
601
+ # constant term in cost function may be added if variables are eliminated
602
+ c0 = 0
603
+ complete = False # complete is True if detected infeasible/unbounded
604
+ x = np.zeros(c.shape) # this is solution vector if completed in presolve
605
+
606
+ status = 0 # all OK unless determined otherwise
607
+ message = ""
608
+
609
+ # Lower and upper bounds. Copy to prevent feedback.
610
+ lb = bounds[:, 0].copy()
611
+ ub = bounds[:, 1].copy()
612
+
613
+ m_eq, n = A_eq.shape
614
+ m_ub, n = A_ub.shape
615
+
616
+ if (rr_method is not None
617
+ and rr_method.lower() not in {"svd", "pivot", "id"}):
618
+ message = ("'" + str(rr_method) + "' is not a valid option "
619
+ "for redundancy removal. Valid options are 'SVD', "
620
+ "'pivot', and 'ID'.")
621
+ raise ValueError(message)
622
+
623
+ if sps.issparse(A_eq):
624
+ A_eq = A_eq.tocsr()
625
+ A_ub = A_ub.tocsr()
626
+
627
+ def where(A):
628
+ return A.nonzero()
629
+
630
+ vstack = sps.vstack
631
+ else:
632
+ where = np.where
633
+ vstack = np.vstack
634
+
635
+ # upper bounds > lower bounds
636
+ if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
637
+ status = 2
638
+ message = ("The problem is (trivially) infeasible since one "
639
+ "or more upper bounds are smaller than the corresponding "
640
+ "lower bounds, a lower bound is np.inf or an upper bound "
641
+ "is -np.inf.")
642
+ complete = True
643
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
644
+ c0, x, revstack, complete, status, message)
645
+
646
+ # zero row in equality constraints
647
+ zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
648
+ if np.any(zero_row):
649
+ if np.any(
650
+ np.logical_and(
651
+ zero_row,
652
+ np.abs(b_eq) > tol)): # test_zero_row_1
653
+ # infeasible if RHS is not zero
654
+ status = 2
655
+ message = ("The problem is (trivially) infeasible due to a row "
656
+ "of zeros in the equality constraint matrix with a "
657
+ "nonzero corresponding constraint value.")
658
+ complete = True
659
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
660
+ c0, x, revstack, complete, status, message)
661
+ else: # test_zero_row_2
662
+ # if RHS is zero, we can eliminate this equation entirely
663
+ A_eq = A_eq[np.logical_not(zero_row), :]
664
+ b_eq = b_eq[np.logical_not(zero_row)]
665
+
666
+ # zero row in inequality constraints
667
+ zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
668
+ if np.any(zero_row):
669
+ if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
670
+ # infeasible if RHS is less than zero (because LHS is zero)
671
+ status = 2
672
+ message = ("The problem is (trivially) infeasible due to a row "
673
+ "of zeros in the equality constraint matrix with a "
674
+ "nonzero corresponding constraint value.")
675
+ complete = True
676
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
677
+ c0, x, revstack, complete, status, message)
678
+ else: # test_zero_row_2
679
+ # if LHS is >= 0, we can eliminate this constraint entirely
680
+ A_ub = A_ub[np.logical_not(zero_row), :]
681
+ b_ub = b_ub[np.logical_not(zero_row)]
682
+
683
+ # zero column in (both) constraints
684
+ # this indicates that a variable isn't constrained and can be removed
685
+ A = vstack((A_eq, A_ub))
686
+ if A.shape[0] > 0:
687
+ zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
688
+ # variable will be at upper or lower bound, depending on objective
689
+ x[np.logical_and(zero_col, c < 0)] = ub[
690
+ np.logical_and(zero_col, c < 0)]
691
+ x[np.logical_and(zero_col, c > 0)] = lb[
692
+ np.logical_and(zero_col, c > 0)]
693
+ if np.any(np.isinf(x)): # if an unconstrained variable has no bound
694
+ status = 3
695
+ message = ("If feasible, the problem is (trivially) unbounded "
696
+ "due to a zero column in the constraint matrices. If "
697
+ "you wish to check whether the problem is infeasible, "
698
+ "turn presolve off.")
699
+ complete = True
700
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
701
+ c0, x, revstack, complete, status, message)
702
+ # variables will equal upper/lower bounds will be removed later
703
+ lb[np.logical_and(zero_col, c < 0)] = ub[
704
+ np.logical_and(zero_col, c < 0)]
705
+ ub[np.logical_and(zero_col, c > 0)] = lb[
706
+ np.logical_and(zero_col, c > 0)]
707
+
708
+ # row singleton in equality constraints
709
+ # this fixes a variable and removes the constraint
710
+ singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
711
+ rows = where(singleton_row)[0]
712
+ cols = where(A_eq[rows, :])[1]
713
+ if len(rows) > 0:
714
+ for row, col in zip(rows, cols):
715
+ val = b_eq[row] / A_eq[row, col]
716
+ if not lb[col] - tol <= val <= ub[col] + tol:
717
+ # infeasible if fixed value is not within bounds
718
+ status = 2
719
+ message = ("The problem is (trivially) infeasible because a "
720
+ "singleton row in the equality constraints is "
721
+ "inconsistent with the bounds.")
722
+ complete = True
723
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
724
+ c0, x, revstack, complete, status, message)
725
+ else:
726
+ # sets upper and lower bounds at that fixed value - variable
727
+ # will be removed later
728
+ lb[col] = val
729
+ ub[col] = val
730
+ A_eq = A_eq[np.logical_not(singleton_row), :]
731
+ b_eq = b_eq[np.logical_not(singleton_row)]
732
+
733
+ # row singleton in inequality constraints
734
+ # this indicates a simple bound and the constraint can be removed
735
+ # simple bounds may be adjusted here
736
+ # After all of the simple bound information is combined here, get_Abc will
737
+ # turn the simple bounds into constraints
738
+ singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
739
+ cols = where(A_ub[singleton_row, :])[1]
740
+ rows = where(singleton_row)[0]
741
+ if len(rows) > 0:
742
+ for row, col in zip(rows, cols):
743
+ val = b_ub[row] / A_ub[row, col]
744
+ if A_ub[row, col] > 0: # upper bound
745
+ if val < lb[col] - tol: # infeasible
746
+ complete = True
747
+ elif val < ub[col]: # new upper bound
748
+ ub[col] = val
749
+ else: # lower bound
750
+ if val > ub[col] + tol: # infeasible
751
+ complete = True
752
+ elif val > lb[col]: # new lower bound
753
+ lb[col] = val
754
+ if complete:
755
+ status = 2
756
+ message = ("The problem is (trivially) infeasible because a "
757
+ "singleton row in the upper bound constraints is "
758
+ "inconsistent with the bounds.")
759
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
760
+ c0, x, revstack, complete, status, message)
761
+ A_ub = A_ub[np.logical_not(singleton_row), :]
762
+ b_ub = b_ub[np.logical_not(singleton_row)]
763
+
764
+ # identical bounds indicate that variable can be removed
765
+ i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
766
+ i_nf = np.logical_not(i_f) # indices of "not fixed" variables
767
+
768
+ # test_bounds_equal_but_infeasible
769
+ if np.all(i_f): # if bounds define solution, check for consistency
770
+ residual = b_eq - A_eq.dot(lb)
771
+ slack = b_ub - A_ub.dot(lb)
772
+ if ((A_ub.size > 0 and np.any(slack < 0)) or
773
+ (A_eq.size > 0 and not np.allclose(residual, 0))):
774
+ status = 2
775
+ message = ("The problem is (trivially) infeasible because the "
776
+ "bounds fix all variables to values inconsistent with "
777
+ "the constraints")
778
+ complete = True
779
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
780
+ c0, x, revstack, complete, status, message)
781
+
782
+ ub_mod = ub
783
+ lb_mod = lb
784
+ if np.any(i_f):
785
+ c0 += c[i_f].dot(lb[i_f])
786
+ b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
787
+ b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
788
+ c = c[i_nf]
789
+ x_undo = lb[i_f] # not x[i_f], x is just zeroes
790
+ x = x[i_nf]
791
+ # user guess x0 stays separate from presolve solution x
792
+ if x0 is not None:
793
+ x0 = x0[i_nf]
794
+ A_eq = A_eq[:, i_nf]
795
+ A_ub = A_ub[:, i_nf]
796
+ # modify bounds
797
+ lb_mod = lb[i_nf]
798
+ ub_mod = ub[i_nf]
799
+
800
+ def rev(x_mod):
801
+ # Function to restore x: insert x_undo into x_mod.
802
+ # When elements have been removed at positions k1, k2, k3, ...
803
+ # then these must be replaced at (after) positions k1-1, k2-2,
804
+ # k3-3, ... in the modified array to recreate the original
805
+ i = np.flatnonzero(i_f)
806
+ # Number of variables to restore
807
+ N = len(i)
808
+ index_offset = np.arange(N)
809
+ # Create insert indices
810
+ insert_indices = i - index_offset
811
+ x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
812
+ return x_rev
813
+
814
+ # Use revstack as a list of functions, currently just this one.
815
+ revstack.append(rev)
816
+
817
+ # no constraints indicates that problem is trivial
818
+ if A_eq.size == 0 and A_ub.size == 0:
819
+ b_eq = np.array([])
820
+ b_ub = np.array([])
821
+ # test_empty_constraint_1
822
+ if c.size == 0:
823
+ status = 0
824
+ message = ("The solution was determined in presolve as there are "
825
+ "no non-trivial constraints.")
826
+ elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
827
+ np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
828
+ # test_no_constraints()
829
+ # test_unbounded_no_nontrivial_constraints_1
830
+ # test_unbounded_no_nontrivial_constraints_2
831
+ status = 3
832
+ message = ("The problem is (trivially) unbounded "
833
+ "because there are no non-trivial constraints and "
834
+ "a) at least one decision variable is unbounded "
835
+ "above and its corresponding cost is negative, or "
836
+ "b) at least one decision variable is unbounded below "
837
+ "and its corresponding cost is positive. ")
838
+ else: # test_empty_constraint_2
839
+ status = 0
840
+ message = ("The solution was determined in presolve as there are "
841
+ "no non-trivial constraints.")
842
+ complete = True
843
+ x[c < 0] = ub_mod[c < 0]
844
+ x[c > 0] = lb_mod[c > 0]
845
+ # where c is zero, set x to a finite bound or zero
846
+ x_zero_c = ub_mod[c == 0]
847
+ x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
848
+ x_zero_c[np.isinf(x_zero_c)] = 0
849
+ x[c == 0] = x_zero_c
850
+ # if this is not the last step of presolve, should convert bounds back
851
+ # to array and return here
852
+
853
+ # Convert modified lb and ub back into N x 2 bounds
854
+ bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
855
+
856
+ # remove redundant (linearly dependent) rows from equality constraints
857
+ n_rows_A = A_eq.shape[0]
858
+ redundancy_warning = ("A_eq does not appear to be of full row rank. To "
859
+ "improve performance, check the problem formulation "
860
+ "for redundant equality constraints.")
861
+ if (sps.issparse(A_eq)):
862
+ if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
863
+ rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
864
+ A_eq, b_eq, status, message = rr_res
865
+ if A_eq.shape[0] < n_rows_A:
866
+ warn(redundancy_warning, OptimizeWarning, stacklevel=1)
867
+ if status != 0:
868
+ complete = True
869
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
870
+ c0, x, revstack, complete, status, message)
871
+
872
+ # This is a wild guess for which redundancy removal algorithm will be
873
+ # faster. More testing would be good.
874
+ small_nullspace = 5
875
+ if rr and A_eq.size > 0:
876
+ try: # TODO: use results of first SVD in _remove_redundancy_svd
877
+ rank = np.linalg.matrix_rank(A_eq)
878
+ # oh well, we'll have to go with _remove_redundancy_pivot_dense
879
+ except Exception:
880
+ rank = 0
881
+ if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
882
+ warn(redundancy_warning, OptimizeWarning, stacklevel=3)
883
+ dim_row_nullspace = A_eq.shape[0]-rank
884
+ if rr_method is None:
885
+ if dim_row_nullspace <= small_nullspace:
886
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
887
+ A_eq, b_eq, status, message = rr_res
888
+ if dim_row_nullspace > small_nullspace or status == 4:
889
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
890
+ A_eq, b_eq, status, message = rr_res
891
+
892
+ else:
893
+ rr_method = rr_method.lower()
894
+ if rr_method == "svd":
895
+ rr_res = _remove_redundancy_svd(A_eq, b_eq)
896
+ A_eq, b_eq, status, message = rr_res
897
+ elif rr_method == "pivot":
898
+ rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
899
+ A_eq, b_eq, status, message = rr_res
900
+ elif rr_method == "id":
901
+ rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
902
+ A_eq, b_eq, status, message = rr_res
903
+ else: # shouldn't get here; option validity checked above
904
+ pass
905
+ if A_eq.shape[0] < rank:
906
+ message = ("Due to numerical issues, redundant equality "
907
+ "constraints could not be removed automatically. "
908
+ "Try providing your constraint matrices as sparse "
909
+ "matrices to activate sparse presolve, try turning "
910
+ "off redundancy removal, or try turning off presolve "
911
+ "altogether.")
912
+ status = 4
913
+ if status != 0:
914
+ complete = True
915
+ return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
916
+ c0, x, revstack, complete, status, message)
917
+
918
+
919
+ def _parse_linprog(lp, options, meth):
920
+ """
921
+ Parse the provided linear programming problem
922
+
923
+ ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
924
+ ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
925
+ provided constraints (``A_ub`` and ``A_eq) and if these match the provided
926
+ sparsity optional values.
927
+
928
+ ``_clean inputs`` checks of the provided inputs. If no violations are
929
+ identified the objective vector, upper bound constraints, equality
930
+ constraints, and simple bounds are returned in the expected format.
931
+
932
+ Parameters
933
+ ----------
934
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
935
+
936
+ c : 1D array
937
+ The coefficients of the linear objective function to be minimized.
938
+ A_ub : 2D array, optional
939
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
940
+ coefficients of a linear inequality constraint on ``x``.
941
+ b_ub : 1D array, optional
942
+ The inequality constraint vector. Each element represents an
943
+ upper bound on the corresponding value of ``A_ub @ x``.
944
+ A_eq : 2D array, optional
945
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
946
+ coefficients of a linear equality constraint on ``x``.
947
+ b_eq : 1D array, optional
948
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
949
+ the corresponding element of ``b_eq``.
950
+ bounds : various valid formats, optional
951
+ The bounds of ``x``, as ``min`` and ``max`` pairs.
952
+ If bounds are specified for all N variables separately, valid formats are:
953
+ * a 2D array (2 x N or N x 2);
954
+ * a sequence of N sequences, each with 2 values.
955
+ If all variables have the same bounds, a single pair of values can
956
+ be specified. Valid formats are:
957
+ * a sequence with 2 scalar values;
958
+ * a sequence with a single element containing 2 scalar values.
959
+ If all variables have a lower bound of 0 and no upper bound, the bounds
960
+ parameter can be omitted (or given as None).
961
+ x0 : 1D array, optional
962
+ Guess values of the decision variables, which will be refined by
963
+ the optimization algorithm. This argument is currently used only by the
964
+ 'revised simplex' method, and can only be used if `x0` represents a
965
+ basic feasible solution.
966
+
967
+ options : dict
968
+ A dictionary of solver options. All methods accept the following
969
+ generic options:
970
+
971
+ maxiter : int
972
+ Maximum number of iterations to perform.
973
+ disp : bool
974
+ Set to True to print convergence messages.
975
+
976
+ For method-specific options, see :func:`show_options('linprog')`.
977
+
978
+ Returns
979
+ -------
980
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
981
+
982
+ c : 1D array
983
+ The coefficients of the linear objective function to be minimized.
984
+ A_ub : 2D array, optional
985
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
986
+ coefficients of a linear inequality constraint on ``x``.
987
+ b_ub : 1D array, optional
988
+ The inequality constraint vector. Each element represents an
989
+ upper bound on the corresponding value of ``A_ub @ x``.
990
+ A_eq : 2D array, optional
991
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
992
+ coefficients of a linear equality constraint on ``x``.
993
+ b_eq : 1D array, optional
994
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
995
+ the corresponding element of ``b_eq``.
996
+ bounds : 2D array
997
+ The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
998
+ elements of ``x``. The N x 2 array contains lower bounds in the first
999
+ column and upper bounds in the 2nd. Unbounded variables have lower
1000
+ bound -np.inf and/or upper bound np.inf.
1001
+ x0 : 1D array, optional
1002
+ Guess values of the decision variables, which will be refined by
1003
+ the optimization algorithm. This argument is currently used only by the
1004
+ 'revised simplex' method, and can only be used if `x0` represents a
1005
+ basic feasible solution.
1006
+
1007
+ options : dict, optional
1008
+ A dictionary of solver options. All methods accept the following
1009
+ generic options:
1010
+
1011
+ maxiter : int
1012
+ Maximum number of iterations to perform.
1013
+ disp : bool
1014
+ Set to True to print convergence messages.
1015
+
1016
+ For method-specific options, see :func:`show_options('linprog')`.
1017
+
1018
+ """
1019
+ if options is None:
1020
+ options = {}
1021
+
1022
+ solver_options = {k: v for k, v in options.items()}
1023
+ solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
1024
+ lp.A_ub, lp.A_eq)
1025
+ # Convert lists to numpy arrays, etc...
1026
+ lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
1027
+ return lp, solver_options
1028
+
1029
+
1030
+ def _get_Abc(lp, c0):
1031
+ """
1032
+ Given a linear programming problem of the form:
1033
+
1034
+ Minimize::
1035
+
1036
+ c @ x
1037
+
1038
+ Subject to::
1039
+
1040
+ A_ub @ x <= b_ub
1041
+ A_eq @ x == b_eq
1042
+ lb <= x <= ub
1043
+
1044
+ where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
1045
+
1046
+ Return the problem in standard form:
1047
+
1048
+ Minimize::
1049
+
1050
+ c @ x
1051
+
1052
+ Subject to::
1053
+
1054
+ A @ x == b
1055
+ x >= 0
1056
+
1057
+ by adding slack variables and making variable substitutions as necessary.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1062
+
1063
+ c : 1D array
1064
+ The coefficients of the linear objective function to be minimized.
1065
+ A_ub : 2D array, optional
1066
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1067
+ coefficients of a linear inequality constraint on ``x``.
1068
+ b_ub : 1D array, optional
1069
+ The inequality constraint vector. Each element represents an
1070
+ upper bound on the corresponding value of ``A_ub @ x``.
1071
+ A_eq : 2D array, optional
1072
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1073
+ coefficients of a linear equality constraint on ``x``.
1074
+ b_eq : 1D array, optional
1075
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1076
+ the corresponding element of ``b_eq``.
1077
+ bounds : 2D array
1078
+ The bounds of ``x``, lower bounds in the 1st column, upper
1079
+ bounds in the 2nd column. The bounds are possibly tightened
1080
+ by the presolve procedure.
1081
+ x0 : 1D array, optional
1082
+ Guess values of the decision variables, which will be refined by
1083
+ the optimization algorithm. This argument is currently used only by the
1084
+ 'revised simplex' method, and can only be used if `x0` represents a
1085
+ basic feasible solution.
1086
+
1087
+ c0 : float
1088
+ Constant term in objective function due to fixed (and eliminated)
1089
+ variables.
1090
+
1091
+ Returns
1092
+ -------
1093
+ A : 2-D array
1094
+ 2-D array such that ``A`` @ ``x``, gives the values of the equality
1095
+ constraints at ``x``.
1096
+ b : 1-D array
1097
+ 1-D array of values representing the RHS of each equality constraint
1098
+ (row) in A (for standard form problem).
1099
+ c : 1-D array
1100
+ Coefficients of the linear objective function to be minimized (for
1101
+ standard form problem).
1102
+ c0 : float
1103
+ Constant term in objective function due to fixed (and eliminated)
1104
+ variables.
1105
+ x0 : 1-D array
1106
+ Starting values of the independent variables, which will be refined by
1107
+ the optimization algorithm
1108
+
1109
+ References
1110
+ ----------
1111
+ .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
1112
+ programming." Athena Scientific 1 (1997): 997.
1113
+
1114
+ """
1115
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
1116
+
1117
+ if sps.issparse(A_eq):
1118
+ sparse = True
1119
+ A_eq = sps.csr_matrix(A_eq)
1120
+ A_ub = sps.csr_matrix(A_ub)
1121
+
1122
+ def hstack(blocks):
1123
+ return sps.hstack(blocks, format="csr")
1124
+
1125
+ def vstack(blocks):
1126
+ return sps.vstack(blocks, format="csr")
1127
+
1128
+ zeros = sps.csr_matrix
1129
+ eye = sps.eye
1130
+ else:
1131
+ sparse = False
1132
+ hstack = np.hstack
1133
+ vstack = np.vstack
1134
+ zeros = np.zeros
1135
+ eye = np.eye
1136
+
1137
+ # Variables lbs and ubs (see below) may be changed, which feeds back into
1138
+ # bounds, so copy.
1139
+ bounds = np.array(bounds, copy=True)
1140
+
1141
+ # modify problem such that all variables have only non-negativity bounds
1142
+ lbs = bounds[:, 0]
1143
+ ubs = bounds[:, 1]
1144
+ m_ub, n_ub = A_ub.shape
1145
+
1146
+ lb_none = np.equal(lbs, -np.inf)
1147
+ ub_none = np.equal(ubs, np.inf)
1148
+ lb_some = np.logical_not(lb_none)
1149
+ ub_some = np.logical_not(ub_none)
1150
+
1151
+ # unbounded below: substitute xi = -xi' (unbounded above)
1152
+ # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
1153
+ l_nolb_someub = np.logical_and(lb_none, ub_some)
1154
+ i_nolb = np.nonzero(l_nolb_someub)[0]
1155
+ lbs[l_nolb_someub], ubs[l_nolb_someub] = (
1156
+ -ubs[l_nolb_someub], -lbs[l_nolb_someub])
1157
+ lb_none = np.equal(lbs, -np.inf)
1158
+ ub_none = np.equal(ubs, np.inf)
1159
+ lb_some = np.logical_not(lb_none)
1160
+ ub_some = np.logical_not(ub_none)
1161
+ c[i_nolb] *= -1
1162
+ if x0 is not None:
1163
+ x0[i_nolb] *= -1
1164
+ if len(i_nolb) > 0:
1165
+ if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
1166
+ A_ub[:, i_nolb] *= -1
1167
+ if A_eq.shape[0] > 0:
1168
+ A_eq[:, i_nolb] *= -1
1169
+
1170
+ # upper bound: add inequality constraint
1171
+ i_newub, = ub_some.nonzero()
1172
+ ub_newub = ubs[ub_some]
1173
+ n_bounds = len(i_newub)
1174
+ if n_bounds > 0:
1175
+ shape = (n_bounds, A_ub.shape[1])
1176
+ if sparse:
1177
+ idxs = (np.arange(n_bounds), i_newub)
1178
+ A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
1179
+ shape=shape)))
1180
+ else:
1181
+ A_ub = vstack((A_ub, np.zeros(shape)))
1182
+ A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
1183
+ b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
1184
+ b_ub[m_ub:] = ub_newub
1185
+
1186
+ A1 = vstack((A_ub, A_eq))
1187
+ b = np.concatenate((b_ub, b_eq))
1188
+ c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
1189
+ if x0 is not None:
1190
+ x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
1191
+ # unbounded: substitute xi = xi+ + xi-
1192
+ l_free = np.logical_and(lb_none, ub_none)
1193
+ i_free = np.nonzero(l_free)[0]
1194
+ n_free = len(i_free)
1195
+ c = np.concatenate((c, np.zeros(n_free)))
1196
+ if x0 is not None:
1197
+ x0 = np.concatenate((x0, np.zeros(n_free)))
1198
+ A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
1199
+ c[n_ub:n_ub+n_free] = -c[i_free]
1200
+ if x0 is not None:
1201
+ i_free_neg = x0[i_free] < 0
1202
+ x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
1203
+ x0[i_free[i_free_neg]] = 0
1204
+
1205
+ # add slack variables
1206
+ A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
1207
+
1208
+ A = hstack([A1, A2])
1209
+
1210
+ # lower bound: substitute xi = xi' + lb
1211
+ # now there is a constant term in objective
1212
+ i_shift = np.nonzero(lb_some)[0]
1213
+ lb_shift = lbs[lb_some].astype(float)
1214
+ c0 += np.sum(lb_shift * c[i_shift])
1215
+ if sparse:
1216
+ b = b.reshape(-1, 1)
1217
+ A = A.tocsc()
1218
+ b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
1219
+ b = b.ravel()
1220
+ else:
1221
+ b -= (A[:, i_shift] * lb_shift).sum(axis=1)
1222
+ if x0 is not None:
1223
+ x0[i_shift] -= lb_shift
1224
+
1225
+ return A, b, c, c0, x0
1226
+
1227
+
1228
+ def _round_to_power_of_two(x):
1229
+ """
1230
+ Round elements of the array to the nearest power of two.
1231
+ """
1232
+ return 2**np.around(np.log2(x))
1233
+
1234
+
1235
+ def _autoscale(A, b, c, x0):
1236
+ """
1237
+ Scales the problem according to equilibration from [12].
1238
+ Also normalizes the right hand side vector by its maximum element.
1239
+ """
1240
+ m, n = A.shape
1241
+
1242
+ C = 1
1243
+ R = 1
1244
+
1245
+ if A.size > 0:
1246
+
1247
+ R = np.max(np.abs(A), axis=1)
1248
+ if sps.issparse(A):
1249
+ R = R.toarray().flatten()
1250
+ R[R == 0] = 1
1251
+ R = 1/_round_to_power_of_two(R)
1252
+ A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
1253
+ b = b*R
1254
+
1255
+ C = np.max(np.abs(A), axis=0)
1256
+ if sps.issparse(A):
1257
+ C = C.toarray().flatten()
1258
+ C[C == 0] = 1
1259
+ C = 1/_round_to_power_of_two(C)
1260
+ A = A*sps.diags(C) if sps.issparse(A) else A*C
1261
+ c = c*C
1262
+
1263
+ b_scale = np.max(np.abs(b)) if b.size > 0 else 1
1264
+ if b_scale == 0:
1265
+ b_scale = 1.
1266
+ b = b/b_scale
1267
+
1268
+ if x0 is not None:
1269
+ x0 = x0/b_scale*(1/C)
1270
+ return A, b, c, x0, C, b_scale
1271
+
1272
+
1273
+ def _unscale(x, C, b_scale):
1274
+ """
1275
+ Converts solution to _autoscale problem -> solution to original problem.
1276
+ """
1277
+
1278
+ try:
1279
+ n = len(C)
1280
+ # fails if sparse or scalar; that's OK.
1281
+ # this is only needed for original simplex (never sparse)
1282
+ except TypeError:
1283
+ n = len(x)
1284
+
1285
+ return x[:n]*b_scale*C
1286
+
1287
+
1288
+ def _display_summary(message, status, fun, iteration):
1289
+ """
1290
+ Print the termination summary of the linear program
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ message : str
1295
+ A string descriptor of the exit status of the optimization.
1296
+ status : int
1297
+ An integer representing the exit status of the optimization::
1298
+
1299
+ 0 : Optimization terminated successfully
1300
+ 1 : Iteration limit reached
1301
+ 2 : Problem appears to be infeasible
1302
+ 3 : Problem appears to be unbounded
1303
+ 4 : Serious numerical difficulties encountered
1304
+
1305
+ fun : float
1306
+ Value of the objective function.
1307
+ iteration : iteration
1308
+ The number of iterations performed.
1309
+ """
1310
+ print(message)
1311
+ if status in (0, 1):
1312
+ print(f" Current function value: {fun: <12.6f}")
1313
+ print(f" Iterations: {iteration:d}")
1314
+
1315
+
1316
+ def _postsolve(x, postsolve_args, complete=False):
1317
+ """
1318
+ Given solution x to presolved, standard form linear program x, add
1319
+ fixed variables back into the problem and undo the variable substitutions
1320
+ to get solution to original linear program. Also, calculate the objective
1321
+ function value, slack in original upper bound constraints, and residuals
1322
+ in original equality constraints.
1323
+
1324
+ Parameters
1325
+ ----------
1326
+ x : 1-D array
1327
+ Solution vector to the standard-form problem.
1328
+ postsolve_args : tuple
1329
+ Data needed by _postsolve to convert the solution to the standard-form
1330
+ problem into the solution to the original problem, including:
1331
+
1332
+ lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
1333
+
1334
+ c : 1D array
1335
+ The coefficients of the linear objective function to be minimized.
1336
+ A_ub : 2D array, optional
1337
+ The inequality constraint matrix. Each row of ``A_ub`` specifies the
1338
+ coefficients of a linear inequality constraint on ``x``.
1339
+ b_ub : 1D array, optional
1340
+ The inequality constraint vector. Each element represents an
1341
+ upper bound on the corresponding value of ``A_ub @ x``.
1342
+ A_eq : 2D array, optional
1343
+ The equality constraint matrix. Each row of ``A_eq`` specifies the
1344
+ coefficients of a linear equality constraint on ``x``.
1345
+ b_eq : 1D array, optional
1346
+ The equality constraint vector. Each element of ``A_eq @ x`` must equal
1347
+ the corresponding element of ``b_eq``.
1348
+ bounds : 2D array
1349
+ The bounds of ``x``, lower bounds in the 1st column, upper
1350
+ bounds in the 2nd column. The bounds are possibly tightened
1351
+ by the presolve procedure.
1352
+ x0 : 1D array, optional
1353
+ Guess values of the decision variables, which will be refined by
1354
+ the optimization algorithm. This argument is currently used only by the
1355
+ 'revised simplex' method, and can only be used if `x0` represents a
1356
+ basic feasible solution.
1357
+
1358
+ revstack: list of functions
1359
+ the functions in the list reverse the operations of _presolve()
1360
+ the function signature is x_org = f(x_mod), where x_mod is the result
1361
+ of a presolve step and x_org the value at the start of the step
1362
+ complete : bool
1363
+ Whether the solution is was determined in presolve (``True`` if so)
1364
+
1365
+ Returns
1366
+ -------
1367
+ x : 1-D array
1368
+ Solution vector to original linear programming problem
1369
+ fun: float
1370
+ optimal objective value for original problem
1371
+ slack : 1-D array
1372
+ The (non-negative) slack in the upper bound constraints, that is,
1373
+ ``b_ub - A_ub @ x``
1374
+ con : 1-D array
1375
+ The (nominally zero) residuals of the equality constraints, that is,
1376
+ ``b - A_eq @ x``
1377
+ """
1378
+ # note that all the inputs are the ORIGINAL, unmodified versions
1379
+ # no rows, columns have been removed
1380
+
1381
+ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
1382
+ revstack, C, b_scale = postsolve_args[1:]
1383
+
1384
+ x = _unscale(x, C, b_scale)
1385
+
1386
+ # Undo variable substitutions of _get_Abc()
1387
+ # if "complete", problem was solved in presolve; don't do anything here
1388
+ n_x = bounds.shape[0]
1389
+ if not complete and bounds is not None: # bounds are never none, probably
1390
+ n_unbounded = 0
1391
+ for i, bi in enumerate(bounds):
1392
+ lbi = bi[0]
1393
+ ubi = bi[1]
1394
+ if lbi == -np.inf and ubi == np.inf:
1395
+ n_unbounded += 1
1396
+ x[i] = x[i] - x[n_x + n_unbounded - 1]
1397
+ else:
1398
+ if lbi == -np.inf:
1399
+ x[i] = ubi - x[i]
1400
+ else:
1401
+ x[i] += lbi
1402
+ # all the rest of the variables were artificial
1403
+ x = x[:n_x]
1404
+
1405
+ # If there were variables removed from the problem, add them back into the
1406
+ # solution vector
1407
+ # Apply the functions in revstack (reverse direction)
1408
+ for rev in reversed(revstack):
1409
+ x = rev(x)
1410
+
1411
+ fun = x.dot(c)
1412
+ slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
1413
+ # report residuals of ORIGINAL EQ constraints
1414
+ con = b_eq - A_eq.dot(x)
1415
+
1416
+ return x, fun, slack, con
1417
+
1418
+
1419
+ def _check_result(x, fun, status, slack, con, bounds, tol, message,
1420
+ integrality):
1421
+ """
1422
+ Check the validity of the provided solution.
1423
+
1424
+ A valid (optimal) solution satisfies all bounds, all slack variables are
1425
+ negative and all equality constraint residuals are strictly non-zero.
1426
+ Further, the lower-bounds, upper-bounds, slack and residuals contain
1427
+ no nan values.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ x : 1-D array
1432
+ Solution vector to original linear programming problem
1433
+ fun: float
1434
+ optimal objective value for original problem
1435
+ status : int
1436
+ An integer representing the exit status of the optimization::
1437
+
1438
+ 0 : Optimization terminated successfully
1439
+ 1 : Iteration limit reached
1440
+ 2 : Problem appears to be infeasible
1441
+ 3 : Problem appears to be unbounded
1442
+ 4 : Serious numerical difficulties encountered
1443
+
1444
+ slack : 1-D array
1445
+ The (non-negative) slack in the upper bound constraints, that is,
1446
+ ``b_ub - A_ub @ x``
1447
+ con : 1-D array
1448
+ The (nominally zero) residuals of the equality constraints, that is,
1449
+ ``b - A_eq @ x``
1450
+ bounds : 2D array
1451
+ The bounds on the original variables ``x``
1452
+ message : str
1453
+ A string descriptor of the exit status of the optimization.
1454
+ tol : float
1455
+ Termination tolerance; see [1]_ Section 4.5.
1456
+
1457
+ Returns
1458
+ -------
1459
+ status : int
1460
+ An integer representing the exit status of the optimization::
1461
+
1462
+ 0 : Optimization terminated successfully
1463
+ 1 : Iteration limit reached
1464
+ 2 : Problem appears to be infeasible
1465
+ 3 : Problem appears to be unbounded
1466
+ 4 : Serious numerical difficulties encountered
1467
+
1468
+ message : str
1469
+ A string descriptor of the exit status of the optimization.
1470
+ """
1471
+ # Somewhat arbitrary
1472
+ tol = np.sqrt(tol) * 10
1473
+
1474
+ if x is None:
1475
+ # HiGHS does not provide x if infeasible/unbounded
1476
+ if status == 0: # Observed with HiGHS Simplex Primal
1477
+ status = 4
1478
+ message = ("The solver did not provide a solution nor did it "
1479
+ "report a failure. Please submit a bug report.")
1480
+ return status, message
1481
+
1482
+ contains_nans = (
1483
+ np.isnan(x).any()
1484
+ or np.isnan(fun)
1485
+ or np.isnan(slack).any()
1486
+ or np.isnan(con).any()
1487
+ )
1488
+
1489
+ if contains_nans:
1490
+ is_feasible = False
1491
+ else:
1492
+ if integrality is None:
1493
+ integrality = 0
1494
+ valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol)
1495
+ # When integrality is 2 or 3, x must be within bounds OR take value 0
1496
+ valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol)
1497
+ invalid_bounds = not np.all(valid_bounds)
1498
+
1499
+ invalid_slack = status != 3 and (slack < -tol).any()
1500
+ invalid_con = status != 3 and (np.abs(con) > tol).any()
1501
+ is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
1502
+
1503
+ if status == 0 and not is_feasible:
1504
+ status = 4
1505
+ message = ("The solution does not satisfy the constraints within the "
1506
+ "required tolerance of " + f"{tol:.2E}" + ", yet "
1507
+ "no errors were raised and there is no certificate of "
1508
+ "infeasibility or unboundedness. Check whether "
1509
+ "the slack and constraint residuals are acceptable; "
1510
+ "if not, consider enabling presolve, adjusting the "
1511
+ "tolerance option(s), and/or using a different method. "
1512
+ "Please consider submitting a bug report.")
1513
+ elif status == 2 and is_feasible:
1514
+ # Occurs if the simplex method exits after phase one with a very
1515
+ # nearly basic feasible solution. Postsolving can make the solution
1516
+ # basic, however, this solution is NOT optimal
1517
+ status = 4
1518
+ message = ("The solution is feasible, but the solver did not report "
1519
+ "that the solution was optimal. Please try a different "
1520
+ "method.")
1521
+
1522
+ return status, message
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (27.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """This module contains least-squares algorithms."""
2
+ from .least_squares import least_squares
3
+ from .lsq_linear import lsq_linear
4
+
5
+ __all__ = ['least_squares', 'lsq_linear']
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (345 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc ADDED
Binary file (2.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc ADDED
Binary file (8.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc ADDED
Binary file (36.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Trust Region Reflective algorithm for least-squares optimization.
2
+
3
+ The algorithm is based on ideas from paper [STIR]_. The main idea is to
4
+ account for the presence of the bounds by appropriate scaling of the variables (or,
5
+ equivalently, changing a trust-region shape). Let's introduce a vector v:
6
+
7
+ | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
8
+ v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
9
+ | 1, otherwise
10
+
11
+ where g is the gradient of a cost function and lb, ub are the bounds. Its
12
+ components are distances to the bounds at which the anti-gradient points (if
13
+ this distance is finite). Define a scaling matrix D = diag(v**0.5).
14
+ First-order optimality conditions can be stated as
15
+
16
+ D^2 g(x) = 0.
17
+
18
+ Meaning that components of the gradient should be zero for strictly interior
19
+ variables, and components must point inside the feasible region for variables
20
+ on the bound.
21
+
22
+ Now consider this system of equations as a new optimization problem. If the
23
+ point x is strictly interior (not on the bound), then the left-hand side is
24
+ differentiable and the Newton step for it satisfies
25
+
26
+ (D^2 H + diag(g) Jv) p = -D^2 g
27
+
28
+ where H is the Hessian matrix (or its J^T J approximation in least squares),
29
+ Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
30
+ elements of matrix C = diag(g) Jv are non-negative. Introduce the change
31
+ of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
32
+ we have a Newton step satisfying
33
+
34
+ B_h p_h = -g_h,
35
+
36
+ where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
37
+ J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
38
+ to "hat" variables. To guarantee global convergence we formulate a
39
+ trust-region problem based on the Newton step in the new variables:
40
+
41
+ 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
42
+
43
+ In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
44
+ problem is
45
+
46
+ 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
47
+
48
+ Here, the meaning of the matrix D becomes more clear: it alters the shape
49
+ of a trust-region, such that large steps towards the bounds are not allowed.
50
+ In the implementation, the trust-region problem is solved in "hat" space,
51
+ but handling of the bounds is done in the original space (see below and read
52
+ the code).
53
+
54
+ The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
55
+ must keep iterates strictly feasible (to satisfy aforementioned
56
+ differentiability), the parameter theta controls step back from the boundary
57
+ (see the code for details).
58
+
59
+ The algorithm does another important trick. If the trust-region solution
60
+ doesn't fit into the bounds, then a reflected (from a firstly encountered
61
+ bound) search direction is considered. For motivation and analysis refer to
62
+ [STIR]_ paper (and other papers of the authors). In practice, it doesn't need
63
+ a lot of justifications, the algorithm simply chooses the best step among
64
+ three: a constrained trust-region step, a reflected step and a constrained
65
+ Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
66
+ space).
67
+
68
+ Another feature is that a trust-region radius control strategy is modified to
69
+ account for appearance of the diagonal C matrix (called diag_h in the code).
70
+
71
+ Note that all described peculiarities are completely gone as we consider
72
+ problems without bounds (the algorithm becomes a standard trust-region type
73
+ algorithm very similar to ones implemented in MINPACK).
74
+
75
+ The implementation supports two methods of solving the trust-region problem.
76
+ The first, called 'exact', applies SVD on Jacobian and then solves the problem
77
+ very accurately using the algorithm described in [JJMore]_. It is not
78
+ applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
79
+ approach (sometimes called "indefinite dogleg"), where the problem is solved
80
+ in a subspace spanned by the gradient and the approximate Gauss-Newton step
81
+ found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
82
+ reformulated as a 4th order algebraic equation and solved very accurately by
83
+ ``numpy.roots``. The subspace approach allows to solve very large problems
84
+ (up to couple of millions of residuals on a regular PC), provided the Jacobian
85
+ matrix is sufficiently sparse.
86
+
87
+ References
88
+ ----------
89
+ .. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
90
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
91
+ Minimization Problems," SIAM Journal on Scientific Computing,
92
+ Vol. 21, Number 1, pp 1-23, 1999.
93
+ .. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
94
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
95
+ """
96
+ import numpy as np
97
+ from numpy.linalg import norm
98
+ from scipy.linalg import svd, qr
99
+ from scipy.sparse.linalg import lsmr
100
+ from scipy.optimize import OptimizeResult
101
+
102
+ from .common import (
103
+ step_size_to_bound, find_active_constraints, in_bounds,
104
+ make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
105
+ solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
106
+ evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
107
+ CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
108
+ update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
109
+ print_iteration_nonlinear)
110
+
111
+
112
+ def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
113
+ loss_function, tr_solver, tr_options, verbose):
114
+ # For efficiency, it makes sense to run the simplified version of the
115
+ # algorithm when no bounds are imposed. We decided to write the two
116
+ # separate functions. It violates the DRY principle, but the individual
117
+ # functions are kept the most readable.
118
+ if np.all(lb == -np.inf) and np.all(ub == np.inf):
119
+ return trf_no_bounds(
120
+ fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
121
+ loss_function, tr_solver, tr_options, verbose)
122
+ else:
123
+ return trf_bounds(
124
+ fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
125
+ loss_function, tr_solver, tr_options, verbose)
126
+
127
+
128
+ def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
129
+ """Select the best step according to Trust Region Reflective algorithm."""
130
+ if in_bounds(x + p, lb, ub):
131
+ p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
132
+ return p, p_h, -p_value
133
+
134
+ p_stride, hits = step_size_to_bound(x, p, lb, ub)
135
+
136
+ # Compute the reflected direction.
137
+ r_h = np.copy(p_h)
138
+ r_h[hits.astype(bool)] *= -1
139
+ r = d * r_h
140
+
141
+ # Restrict trust-region step, such that it hits the bound.
142
+ p *= p_stride
143
+ p_h *= p_stride
144
+ x_on_bound = x + p
145
+
146
+ # Reflected direction will cross first either feasible region or trust
147
+ # region boundary.
148
+ _, to_tr = intersect_trust_region(p_h, r_h, Delta)
149
+ to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
150
+
151
+ # Find lower and upper bounds on a step size along the reflected
152
+ # direction, considering the strict feasibility requirement. There is no
153
+ # single correct way to do that, the chosen approach seems to work best
154
+ # on test problems.
155
+ r_stride = min(to_bound, to_tr)
156
+ if r_stride > 0:
157
+ r_stride_l = (1 - theta) * p_stride / r_stride
158
+ if r_stride == to_bound:
159
+ r_stride_u = theta * to_bound
160
+ else:
161
+ r_stride_u = to_tr
162
+ else:
163
+ r_stride_l = 0
164
+ r_stride_u = -1
165
+
166
+ # Check if reflection step is available.
167
+ if r_stride_l <= r_stride_u:
168
+ a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
169
+ r_stride, r_value = minimize_quadratic_1d(
170
+ a, b, r_stride_l, r_stride_u, c=c)
171
+ r_h *= r_stride
172
+ r_h += p_h
173
+ r = r_h * d
174
+ else:
175
+ r_value = np.inf
176
+
177
+ # Now correct p_h to make it strictly interior.
178
+ p *= theta
179
+ p_h *= theta
180
+ p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
181
+
182
+ ag_h = -g_h
183
+ ag = d * ag_h
184
+
185
+ to_tr = Delta / norm(ag_h)
186
+ to_bound, _ = step_size_to_bound(x, ag, lb, ub)
187
+ if to_bound < to_tr:
188
+ ag_stride = theta * to_bound
189
+ else:
190
+ ag_stride = to_tr
191
+
192
+ a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
193
+ ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
194
+ ag_h *= ag_stride
195
+ ag *= ag_stride
196
+
197
+ if p_value < r_value and p_value < ag_value:
198
+ return p, p_h, -p_value
199
+ elif r_value < p_value and r_value < ag_value:
200
+ return r, r_h, -r_value
201
+ else:
202
+ return ag, ag_h, -ag_value
203
+
204
+
205
+ def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
206
+ x_scale, loss_function, tr_solver, tr_options, verbose):
207
+ x = x0.copy()
208
+
209
+ f = f0
210
+ f_true = f.copy()
211
+ nfev = 1
212
+
213
+ J = J0
214
+ njev = 1
215
+ m, n = J.shape
216
+
217
+ if loss_function is not None:
218
+ rho = loss_function(f)
219
+ cost = 0.5 * np.sum(rho[0])
220
+ J, f = scale_for_robust_loss_function(J, f, rho)
221
+ else:
222
+ cost = 0.5 * np.dot(f, f)
223
+
224
+ g = compute_grad(J, f)
225
+
226
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
227
+ if jac_scale:
228
+ scale, scale_inv = compute_jac_scale(J)
229
+ else:
230
+ scale, scale_inv = x_scale, 1 / x_scale
231
+
232
+ v, dv = CL_scaling_vector(x, g, lb, ub)
233
+ v[dv != 0] *= scale_inv[dv != 0]
234
+ Delta = norm(x0 * scale_inv / v**0.5)
235
+ if Delta == 0:
236
+ Delta = 1.0
237
+
238
+ g_norm = norm(g * v, ord=np.inf)
239
+
240
+ f_augmented = np.zeros(m + n)
241
+ if tr_solver == 'exact':
242
+ J_augmented = np.empty((m + n, n))
243
+ elif tr_solver == 'lsmr':
244
+ reg_term = 0.0
245
+ regularize = tr_options.pop('regularize', True)
246
+
247
+ if max_nfev is None:
248
+ max_nfev = x0.size * 100
249
+
250
+ alpha = 0.0 # "Levenberg-Marquardt" parameter
251
+
252
+ termination_status = None
253
+ iteration = 0
254
+ step_norm = None
255
+ actual_reduction = None
256
+
257
+ if verbose == 2:
258
+ print_header_nonlinear()
259
+
260
+ while True:
261
+ v, dv = CL_scaling_vector(x, g, lb, ub)
262
+
263
+ g_norm = norm(g * v, ord=np.inf)
264
+ if g_norm < gtol:
265
+ termination_status = 1
266
+
267
+ if verbose == 2:
268
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
269
+ step_norm, g_norm)
270
+
271
+ if termination_status is not None or nfev == max_nfev:
272
+ break
273
+
274
+ # Now compute variables in "hat" space. Here, we also account for
275
+ # scaling introduced by `x_scale` parameter. This part is a bit tricky,
276
+ # you have to write down the formulas and see how the trust-region
277
+ # problem is formulated when the two types of scaling are applied.
278
+ # The idea is that first we apply `x_scale` and then apply Coleman-Li
279
+ # approach in the new variables.
280
+
281
+ # v is recomputed in the variables after applying `x_scale`, note that
282
+ # components which were identically 1 not affected.
283
+ v[dv != 0] *= scale_inv[dv != 0]
284
+
285
+ # Here, we apply two types of scaling.
286
+ d = v**0.5 * scale
287
+
288
+ # C = diag(g * scale) Jv
289
+ diag_h = g * dv * scale
290
+
291
+ # After all this has been done, we continue normally.
292
+
293
+ # "hat" gradient.
294
+ g_h = d * g
295
+
296
+ f_augmented[:m] = f
297
+ if tr_solver == 'exact':
298
+ J_augmented[:m] = J * d
299
+ J_h = J_augmented[:m] # Memory view.
300
+ J_augmented[m:] = np.diag(diag_h**0.5)
301
+ U, s, V = svd(J_augmented, full_matrices=False)
302
+ V = V.T
303
+ uf = U.T.dot(f_augmented)
304
+ elif tr_solver == 'lsmr':
305
+ J_h = right_multiplied_operator(J, d)
306
+
307
+ if regularize:
308
+ a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
309
+ to_tr = Delta / norm(g_h)
310
+ ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
311
+ reg_term = -ag_value / Delta**2
312
+
313
+ lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
314
+ gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
315
+ S = np.vstack((g_h, gn_h)).T
316
+ S, _ = qr(S, mode='economic')
317
+ JS = J_h.dot(S) # LinearOperator does dot too.
318
+ B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
319
+ g_S = S.T.dot(g_h)
320
+
321
+ # theta controls step back step ratio from the bounds.
322
+ theta = max(0.995, 1 - g_norm)
323
+
324
+ actual_reduction = -1
325
+ while actual_reduction <= 0 and nfev < max_nfev:
326
+ if tr_solver == 'exact':
327
+ p_h, alpha, n_iter = solve_lsq_trust_region(
328
+ n, m, uf, s, V, Delta, initial_alpha=alpha)
329
+ elif tr_solver == 'lsmr':
330
+ p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
331
+ p_h = S.dot(p_S)
332
+
333
+ p = d * p_h # Trust-region solution in the original space.
334
+ step, step_h, predicted_reduction = select_step(
335
+ x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
336
+
337
+ x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
338
+ f_new = fun(x_new)
339
+ nfev += 1
340
+
341
+ step_h_norm = norm(step_h)
342
+
343
+ if not np.all(np.isfinite(f_new)):
344
+ Delta = 0.25 * step_h_norm
345
+ continue
346
+
347
+ # Usual trust-region step quality estimation.
348
+ if loss_function is not None:
349
+ cost_new = loss_function(f_new, cost_only=True)
350
+ else:
351
+ cost_new = 0.5 * np.dot(f_new, f_new)
352
+ actual_reduction = cost - cost_new
353
+ Delta_new, ratio = update_tr_radius(
354
+ Delta, actual_reduction, predicted_reduction,
355
+ step_h_norm, step_h_norm > 0.95 * Delta)
356
+
357
+ step_norm = norm(step)
358
+ termination_status = check_termination(
359
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
360
+ if termination_status is not None:
361
+ break
362
+
363
+ alpha *= Delta / Delta_new
364
+ Delta = Delta_new
365
+
366
+ if actual_reduction > 0:
367
+ x = x_new
368
+
369
+ f = f_new
370
+ f_true = f.copy()
371
+
372
+ cost = cost_new
373
+
374
+ J = jac(x, f)
375
+ njev += 1
376
+
377
+ if loss_function is not None:
378
+ rho = loss_function(f)
379
+ J, f = scale_for_robust_loss_function(J, f, rho)
380
+
381
+ g = compute_grad(J, f)
382
+
383
+ if jac_scale:
384
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
385
+ else:
386
+ step_norm = 0
387
+ actual_reduction = 0
388
+
389
+ iteration += 1
390
+
391
+ if termination_status is None:
392
+ termination_status = 0
393
+
394
+ active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
395
+ return OptimizeResult(
396
+ x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
397
+ active_mask=active_mask, nfev=nfev, njev=njev,
398
+ status=termination_status)
399
+
400
+
401
+ def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
402
+ x_scale, loss_function, tr_solver, tr_options, verbose):
403
+ x = x0.copy()
404
+
405
+ f = f0
406
+ f_true = f.copy()
407
+ nfev = 1
408
+
409
+ J = J0
410
+ njev = 1
411
+ m, n = J.shape
412
+
413
+ if loss_function is not None:
414
+ rho = loss_function(f)
415
+ cost = 0.5 * np.sum(rho[0])
416
+ J, f = scale_for_robust_loss_function(J, f, rho)
417
+ else:
418
+ cost = 0.5 * np.dot(f, f)
419
+
420
+ g = compute_grad(J, f)
421
+
422
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
423
+ if jac_scale:
424
+ scale, scale_inv = compute_jac_scale(J)
425
+ else:
426
+ scale, scale_inv = x_scale, 1 / x_scale
427
+
428
+ Delta = norm(x0 * scale_inv)
429
+ if Delta == 0:
430
+ Delta = 1.0
431
+
432
+ if tr_solver == 'lsmr':
433
+ reg_term = 0
434
+ damp = tr_options.pop('damp', 0.0)
435
+ regularize = tr_options.pop('regularize', True)
436
+
437
+ if max_nfev is None:
438
+ max_nfev = x0.size * 100
439
+
440
+ alpha = 0.0 # "Levenberg-Marquardt" parameter
441
+
442
+ termination_status = None
443
+ iteration = 0
444
+ step_norm = None
445
+ actual_reduction = None
446
+
447
+ if verbose == 2:
448
+ print_header_nonlinear()
449
+
450
+ while True:
451
+ g_norm = norm(g, ord=np.inf)
452
+ if g_norm < gtol:
453
+ termination_status = 1
454
+
455
+ if verbose == 2:
456
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
457
+ step_norm, g_norm)
458
+
459
+ if termination_status is not None or nfev == max_nfev:
460
+ break
461
+
462
+ d = scale
463
+ g_h = d * g
464
+
465
+ if tr_solver == 'exact':
466
+ J_h = J * d
467
+ U, s, V = svd(J_h, full_matrices=False)
468
+ V = V.T
469
+ uf = U.T.dot(f)
470
+ elif tr_solver == 'lsmr':
471
+ J_h = right_multiplied_operator(J, d)
472
+
473
+ if regularize:
474
+ a, b = build_quadratic_1d(J_h, g_h, -g_h)
475
+ to_tr = Delta / norm(g_h)
476
+ ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
477
+ reg_term = -ag_value / Delta**2
478
+
479
+ damp_full = (damp**2 + reg_term)**0.5
480
+ gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
481
+ S = np.vstack((g_h, gn_h)).T
482
+ S, _ = qr(S, mode='economic')
483
+ JS = J_h.dot(S)
484
+ B_S = np.dot(JS.T, JS)
485
+ g_S = S.T.dot(g_h)
486
+
487
+ actual_reduction = -1
488
+ while actual_reduction <= 0 and nfev < max_nfev:
489
+ if tr_solver == 'exact':
490
+ step_h, alpha, n_iter = solve_lsq_trust_region(
491
+ n, m, uf, s, V, Delta, initial_alpha=alpha)
492
+ elif tr_solver == 'lsmr':
493
+ p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
494
+ step_h = S.dot(p_S)
495
+
496
+ predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
497
+ step = d * step_h
498
+ x_new = x + step
499
+ f_new = fun(x_new)
500
+ nfev += 1
501
+
502
+ step_h_norm = norm(step_h)
503
+
504
+ if not np.all(np.isfinite(f_new)):
505
+ Delta = 0.25 * step_h_norm
506
+ continue
507
+
508
+ # Usual trust-region step quality estimation.
509
+ if loss_function is not None:
510
+ cost_new = loss_function(f_new, cost_only=True)
511
+ else:
512
+ cost_new = 0.5 * np.dot(f_new, f_new)
513
+ actual_reduction = cost - cost_new
514
+
515
+ Delta_new, ratio = update_tr_radius(
516
+ Delta, actual_reduction, predicted_reduction,
517
+ step_h_norm, step_h_norm > 0.95 * Delta)
518
+
519
+ step_norm = norm(step)
520
+ termination_status = check_termination(
521
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
522
+ if termination_status is not None:
523
+ break
524
+
525
+ alpha *= Delta / Delta_new
526
+ Delta = Delta_new
527
+
528
+ if actual_reduction > 0:
529
+ x = x_new
530
+
531
+ f = f_new
532
+ f_true = f.copy()
533
+
534
+ cost = cost_new
535
+
536
+ J = jac(x, f)
537
+ njev += 1
538
+
539
+ if loss_function is not None:
540
+ rho = loss_function(f)
541
+ J, f = scale_for_robust_loss_function(J, f, rho)
542
+
543
+ g = compute_grad(J, f)
544
+
545
+ if jac_scale:
546
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
547
+ else:
548
+ step_norm = 0
549
+ actual_reduction = 0
550
+
551
+ iteration += 1
552
+
553
+ if termination_status is None:
554
+ termination_status = 0
555
+
556
+ active_mask = np.zeros_like(x)
557
+ return OptimizeResult(
558
+ x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
559
+ active_mask=active_mask, nfev=nfev, njev=njev,
560
+ status=termination_status)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_milp.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import numpy as np
3
+ from scipy.sparse import csc_array, vstack, issparse
4
+ from scipy._lib._util import VisibleDeprecationWarning
5
+ from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import]
6
+ from ._constraints import LinearConstraint, Bounds
7
+ from ._optimize import OptimizeResult
8
+ from ._linprog_highs import _highs_to_scipy_status_message
9
+
10
+
11
+ def _constraints_to_components(constraints):
12
+ """
13
+ Convert sequence of constraints to a single set of components A, b_l, b_u.
14
+
15
+ `constraints` could be
16
+
17
+ 1. A LinearConstraint
18
+ 2. A tuple representing a LinearConstraint
19
+ 3. An invalid object
20
+ 4. A sequence of composed entirely of objects of type 1/2
21
+ 5. A sequence containing at least one object of type 3
22
+
23
+ We want to accept 1, 2, and 4 and reject 3 and 5.
24
+ """
25
+ message = ("`constraints` (or each element within `constraints`) must be "
26
+ "convertible into an instance of "
27
+ "`scipy.optimize.LinearConstraint`.")
28
+ As = []
29
+ b_ls = []
30
+ b_us = []
31
+
32
+ # Accept case 1 by standardizing as case 4
33
+ if isinstance(constraints, LinearConstraint):
34
+ constraints = [constraints]
35
+ else:
36
+ # Reject case 3
37
+ try:
38
+ iter(constraints)
39
+ except TypeError as exc:
40
+ raise ValueError(message) from exc
41
+
42
+ # Accept case 2 by standardizing as case 4
43
+ if len(constraints) == 3:
44
+ # argument could be a single tuple representing a LinearConstraint
45
+ try:
46
+ constraints = [LinearConstraint(*constraints)]
47
+ except (TypeError, ValueError, VisibleDeprecationWarning):
48
+ # argument was not a tuple representing a LinearConstraint
49
+ pass
50
+
51
+ # Address cases 4/5
52
+ for constraint in constraints:
53
+ # if it's not a LinearConstraint or something that represents a
54
+ # LinearConstraint at this point, it's invalid
55
+ if not isinstance(constraint, LinearConstraint):
56
+ try:
57
+ constraint = LinearConstraint(*constraint)
58
+ except TypeError as exc:
59
+ raise ValueError(message) from exc
60
+ As.append(csc_array(constraint.A))
61
+ b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64))
62
+ b_us.append(np.atleast_1d(constraint.ub).astype(np.float64))
63
+
64
+ if len(As) > 1:
65
+ A = vstack(As, format="csc")
66
+ b_l = np.concatenate(b_ls)
67
+ b_u = np.concatenate(b_us)
68
+ else: # avoid unnecessary copying
69
+ A = As[0]
70
+ b_l = b_ls[0]
71
+ b_u = b_us[0]
72
+
73
+ return A, b_l, b_u
74
+
75
+
76
+ def _milp_iv(c, integrality, bounds, constraints, options):
77
+ # objective IV
78
+ if issparse(c):
79
+ raise ValueError("`c` must be a dense array.")
80
+ c = np.atleast_1d(c).astype(np.float64)
81
+ if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)):
82
+ message = ("`c` must be a one-dimensional array of finite numbers "
83
+ "with at least one element.")
84
+ raise ValueError(message)
85
+
86
+ # integrality IV
87
+ if issparse(integrality):
88
+ raise ValueError("`integrality` must be a dense array.")
89
+ message = ("`integrality` must contain integers 0-3 and be broadcastable "
90
+ "to `c.shape`.")
91
+ if integrality is None:
92
+ integrality = 0
93
+ try:
94
+ integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
95
+ except ValueError:
96
+ raise ValueError(message)
97
+ if integrality.min() < 0 or integrality.max() > 3:
98
+ raise ValueError(message)
99
+
100
+ # bounds IV
101
+ if bounds is None:
102
+ bounds = Bounds(0, np.inf)
103
+ elif not isinstance(bounds, Bounds):
104
+ message = ("`bounds` must be convertible into an instance of "
105
+ "`scipy.optimize.Bounds`.")
106
+ try:
107
+ bounds = Bounds(*bounds)
108
+ except TypeError as exc:
109
+ raise ValueError(message) from exc
110
+
111
+ try:
112
+ lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64)
113
+ ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64)
114
+ except (ValueError, TypeError) as exc:
115
+ message = ("`bounds.lb` and `bounds.ub` must contain reals and "
116
+ "be broadcastable to `c.shape`.")
117
+ raise ValueError(message) from exc
118
+
119
+ # constraints IV
120
+ if not constraints:
121
+ constraints = [LinearConstraint(np.empty((0, c.size)),
122
+ np.empty((0,)), np.empty((0,)))]
123
+ try:
124
+ A, b_l, b_u = _constraints_to_components(constraints)
125
+ except ValueError as exc:
126
+ message = ("`constraints` (or each element within `constraints`) must "
127
+ "be convertible into an instance of "
128
+ "`scipy.optimize.LinearConstraint`.")
129
+ raise ValueError(message) from exc
130
+
131
+ if A.shape != (b_l.size, c.size):
132
+ message = "The shape of `A` must be (len(b_l), len(c))."
133
+ raise ValueError(message)
134
+ indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64)
135
+
136
+ # options IV
137
+ options = options or {}
138
+ supported_options = {'disp', 'presolve', 'time_limit', 'node_limit',
139
+ 'mip_rel_gap'}
140
+ unsupported_options = set(options).difference(supported_options)
141
+ if unsupported_options:
142
+ message = (f"Unrecognized options detected: {unsupported_options}. "
143
+ "These will be passed to HiGHS verbatim.")
144
+ warnings.warn(message, RuntimeWarning, stacklevel=3)
145
+ options_iv = {'log_to_console': options.pop("disp", False),
146
+ 'mip_max_nodes': options.pop("node_limit", None)}
147
+ options_iv.update(options)
148
+
149
+ return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
150
+
151
+
152
+ def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):
153
+ r"""
154
+ Mixed-integer linear programming
155
+
156
+ Solves problems of the following form:
157
+
158
+ .. math::
159
+
160
+ \min_x \ & c^T x \\
161
+ \mbox{such that} \ & b_l \leq A x \leq b_u,\\
162
+ & l \leq x \leq u, \\
163
+ & x_i \in \mathbb{Z}, i \in X_i
164
+
165
+ where :math:`x` is a vector of decision variables;
166
+ :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;
167
+ :math:`A` is a matrix, and :math:`X_i` is the set of indices of
168
+ decision variables that must be integral. (In this context, a
169
+ variable that can assume only integer values is said to be "integral";
170
+ it has an "integrality" constraint.)
171
+
172
+ Alternatively, that's:
173
+
174
+ minimize::
175
+
176
+ c @ x
177
+
178
+ such that::
179
+
180
+ b_l <= A @ x <= b_u
181
+ l <= x <= u
182
+ Specified elements of x must be integers
183
+
184
+ By default, ``l = 0`` and ``u = np.inf`` unless specified with
185
+ ``bounds``.
186
+
187
+ Parameters
188
+ ----------
189
+ c : 1D dense array_like
190
+ The coefficients of the linear objective function to be minimized.
191
+ `c` is converted to a double precision array before the problem is
192
+ solved.
193
+ integrality : 1D dense array_like, optional
194
+ Indicates the type of integrality constraint on each decision variable.
195
+
196
+ ``0`` : Continuous variable; no integrality constraint.
197
+
198
+ ``1`` : Integer variable; decision variable must be an integer
199
+ within `bounds`.
200
+
201
+ ``2`` : Semi-continuous variable; decision variable must be within
202
+ `bounds` or take value ``0``.
203
+
204
+ ``3`` : Semi-integer variable; decision variable must be an integer
205
+ within `bounds` or take value ``0``.
206
+
207
+ By default, all variables are continuous. `integrality` is converted
208
+ to an array of integers before the problem is solved.
209
+
210
+ bounds : scipy.optimize.Bounds, optional
211
+ Bounds on the decision variables. Lower and upper bounds are converted
212
+ to double precision arrays before the problem is solved. The
213
+ ``keep_feasible`` parameter of the `Bounds` object is ignored. If
214
+ not specified, all decision variables are constrained to be
215
+ non-negative.
216
+ constraints : sequence of scipy.optimize.LinearConstraint, optional
217
+ Linear constraints of the optimization problem. Arguments may be
218
+ one of the following:
219
+
220
+ 1. A single `LinearConstraint` object
221
+ 2. A single tuple that can be converted to a `LinearConstraint` object
222
+ as ``LinearConstraint(*constraints)``
223
+ 3. A sequence composed entirely of objects of type 1. and 2.
224
+
225
+ Before the problem is solved, all values are converted to double
226
+ precision, and the matrices of constraint coefficients are converted to
227
+ instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter
228
+ of `LinearConstraint` objects is ignored.
229
+ options : dict, optional
230
+ A dictionary of solver options. The following keys are recognized.
231
+
232
+ disp : bool (default: ``False``)
233
+ Set to ``True`` if indicators of optimization status are to be
234
+ printed to the console during optimization.
235
+ node_limit : int, optional
236
+ The maximum number of nodes (linear program relaxations) to solve
237
+ before stopping. Default is no maximum number of nodes.
238
+ presolve : bool (default: ``True``)
239
+ Presolve attempts to identify trivial infeasibilities,
240
+ identify trivial unboundedness, and simplify the problem before
241
+ sending it to the main solver.
242
+ time_limit : float, optional
243
+ The maximum number of seconds allotted to solve the problem.
244
+ Default is no time limit.
245
+ mip_rel_gap : float, optional
246
+ Termination criterion for MIP solver: solver will terminate when
247
+ the gap between the primal objective value and the dual objective
248
+ bound, scaled by the primal objective value, is <= mip_rel_gap.
249
+
250
+ Returns
251
+ -------
252
+ res : OptimizeResult
253
+ An instance of :class:`scipy.optimize.OptimizeResult`. The object
254
+ is guaranteed to have the following attributes.
255
+
256
+ status : int
257
+ An integer representing the exit status of the algorithm.
258
+
259
+ ``0`` : Optimal solution found.
260
+
261
+ ``1`` : Iteration or time limit reached.
262
+
263
+ ``2`` : Problem is infeasible.
264
+
265
+ ``3`` : Problem is unbounded.
266
+
267
+ ``4`` : Other; see message for details.
268
+
269
+ success : bool
270
+ ``True`` when an optimal solution is found and ``False`` otherwise.
271
+
272
+ message : str
273
+ A string descriptor of the exit status of the algorithm.
274
+
275
+ The following attributes will also be present, but the values may be
276
+ ``None``, depending on the solution status.
277
+
278
+ x : ndarray
279
+ The values of the decision variables that minimize the
280
+ objective function while satisfying the constraints.
281
+ fun : float
282
+ The optimal value of the objective function ``c @ x``.
283
+ mip_node_count : int
284
+ The number of subproblems or "nodes" solved by the MILP solver.
285
+ mip_dual_bound : float
286
+ The MILP solver's final estimate of the lower bound on the optimal
287
+ solution.
288
+ mip_gap : float
289
+ The difference between the primal objective value and the dual
290
+ objective bound, scaled by the primal objective value.
291
+
292
+ Notes
293
+ -----
294
+ `milp` is a wrapper of the HiGHS linear optimization software [1]_. The
295
+ algorithm is deterministic, and it typically finds the global optimum of
296
+ moderately challenging mixed-integer linear programs (when it exists).
297
+
298
+ References
299
+ ----------
300
+ .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
301
+ "HiGHS - high performance software for linear optimization."
302
+ https://highs.dev/
303
+ .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
304
+ simplex method." Mathematical Programming Computation, 10 (1),
305
+ 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
306
+
307
+ Examples
308
+ --------
309
+ Consider the problem at
310
+ https://en.wikipedia.org/wiki/Integer_programming#Example, which is
311
+ expressed as a maximization problem of two variables. Since `milp` requires
312
+ that the problem be expressed as a minimization problem, the objective
313
+ function coefficients on the decision variables are:
314
+
315
+ >>> import numpy as np
316
+ >>> c = -np.array([0, 1])
317
+
318
+ Note the negative sign: we maximize the original objective function
319
+ by minimizing the negative of the objective function.
320
+
321
+ We collect the coefficients of the constraints into arrays like:
322
+
323
+ >>> A = np.array([[-1, 1], [3, 2], [2, 3]])
324
+ >>> b_u = np.array([1, 12, 12])
325
+ >>> b_l = np.full_like(b_u, -np.inf)
326
+
327
+ Because there is no lower limit on these constraints, we have defined a
328
+ variable ``b_l`` full of values representing negative infinity. This may
329
+ be unfamiliar to users of `scipy.optimize.linprog`, which only accepts
330
+ "less than" (or "upper bound") inequality constraints of the form
331
+ ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints
332
+ ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than"
333
+ inequality constraints, "less than" inequality constraints, and equality
334
+ constraints concisely.
335
+
336
+ These arrays are collected into a single `LinearConstraint` object like:
337
+
338
+ >>> from scipy.optimize import LinearConstraint
339
+ >>> constraints = LinearConstraint(A, b_l, b_u)
340
+
341
+ The non-negativity bounds on the decision variables are enforced by
342
+ default, so we do not need to provide an argument for `bounds`.
343
+
344
+ Finally, the problem states that both decision variables must be integers:
345
+
346
+ >>> integrality = np.ones_like(c)
347
+
348
+ We solve the problem like:
349
+
350
+ >>> from scipy.optimize import milp
351
+ >>> res = milp(c=c, constraints=constraints, integrality=integrality)
352
+ >>> res.x
353
+ [1.0, 2.0]
354
+
355
+ Note that had we solved the relaxed problem (without integrality
356
+ constraints):
357
+
358
+ >>> res = milp(c=c, constraints=constraints) # OR:
359
+ >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)
360
+ >>> res.x
361
+ [1.8, 2.8]
362
+
363
+ we would not have obtained the correct solution by rounding to the nearest
364
+ integers.
365
+
366
+ Other examples are given :ref:`in the tutorial <tutorial-optimize_milp>`.
367
+
368
+ """
369
+ args_iv = _milp_iv(c, integrality, bounds, constraints, options)
370
+ c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv
371
+
372
+ highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,
373
+ lb, ub, integrality, options)
374
+
375
+ res = {}
376
+
377
+ # Convert to scipy-style status and message
378
+ highs_status = highs_res.get('status', None)
379
+ highs_message = highs_res.get('message', None)
380
+ status, message = _highs_to_scipy_status_message(highs_status,
381
+ highs_message)
382
+ res['status'] = status
383
+ res['message'] = message
384
+ res['success'] = (status == 0)
385
+ x = highs_res.get('x', None)
386
+ res['x'] = np.array(x) if x is not None else None
387
+ res['fun'] = highs_res.get('fun', None)
388
+ res['mip_node_count'] = highs_res.get('mip_node_count', None)
389
+ res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)
390
+ res['mip_gap'] = highs_res.get('mip_gap', None)
391
+
392
+ return OptimizeResult(res)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from . import _minpack
3
+
4
+ import numpy as np
5
+ from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater,
6
+ asarray, inf,
7
+ finfo, inexact, issubdtype, dtype)
8
+ from scipy import linalg
9
+ from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
10
+ from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan
11
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
12
+ from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
13
+ from ._lsq import least_squares
14
+ # from ._lsq.common import make_strictly_feasible
15
+ from ._lsq.least_squares import prepare_bounds
16
+ from scipy.optimize._minimize import Bounds
17
+
18
+ # deprecated imports to be removed in SciPy 1.13.0
19
+ from numpy import dot, eye, take # noqa: F401
20
+ from numpy.linalg import inv # noqa: F401
21
+
22
+ error = _minpack.error
23
+
24
+ __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
25
+
26
+
27
+ def _check_func(checker, argname, thefunc, x0, args, numinputs,
28
+ output_shape=None):
29
+ res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
30
+ if (output_shape is not None) and (shape(res) != output_shape):
31
+ if (output_shape[0] != 1):
32
+ if len(output_shape) > 1:
33
+ if output_shape[1] == 1:
34
+ return shape(res)
35
+ msg = f"{checker}: there is a mismatch between the input and output " \
36
+ f"shape of the '{argname}' argument"
37
+ func_name = getattr(thefunc, '__name__', None)
38
+ if func_name:
39
+ msg += " '%s'." % func_name
40
+ else:
41
+ msg += "."
42
+ msg += f'Shape should be {output_shape} but it is {shape(res)}.'
43
+ raise TypeError(msg)
44
+ if issubdtype(res.dtype, inexact):
45
+ dt = res.dtype
46
+ else:
47
+ dt = dtype(float)
48
+ return shape(res), dt
49
+
50
+
51
+ def fsolve(func, x0, args=(), fprime=None, full_output=0,
52
+ col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
53
+ epsfcn=None, factor=100, diag=None):
54
+ """
55
+ Find the roots of a function.
56
+
57
+ Return the roots of the (non-linear) equations defined by
58
+ ``func(x) = 0`` given a starting estimate.
59
+
60
+ Parameters
61
+ ----------
62
+ func : callable ``f(x, *args)``
63
+ A function that takes at least one (possibly vector) argument,
64
+ and returns a value of the same length.
65
+ x0 : ndarray
66
+ The starting estimate for the roots of ``func(x) = 0``.
67
+ args : tuple, optional
68
+ Any extra arguments to `func`.
69
+ fprime : callable ``f(x, *args)``, optional
70
+ A function to compute the Jacobian of `func` with derivatives
71
+ across the rows. By default, the Jacobian will be estimated.
72
+ full_output : bool, optional
73
+ If True, return optional outputs.
74
+ col_deriv : bool, optional
75
+ Specify whether the Jacobian function computes derivatives down
76
+ the columns (faster, because there is no transpose operation).
77
+ xtol : float, optional
78
+ The calculation will terminate if the relative error between two
79
+ consecutive iterates is at most `xtol`.
80
+ maxfev : int, optional
81
+ The maximum number of calls to the function. If zero, then
82
+ ``100*(N+1)`` is the maximum where N is the number of elements
83
+ in `x0`.
84
+ band : tuple, optional
85
+ If set to a two-sequence containing the number of sub- and
86
+ super-diagonals within the band of the Jacobi matrix, the
87
+ Jacobi matrix is considered banded (only for ``fprime=None``).
88
+ epsfcn : float, optional
89
+ A suitable step length for the forward-difference
90
+ approximation of the Jacobian (for ``fprime=None``). If
91
+ `epsfcn` is less than the machine precision, it is assumed
92
+ that the relative errors in the functions are of the order of
93
+ the machine precision.
94
+ factor : float, optional
95
+ A parameter determining the initial step bound
96
+ (``factor * || diag * x||``). Should be in the interval
97
+ ``(0.1, 100)``.
98
+ diag : sequence, optional
99
+ N positive entries that serve as a scale factors for the
100
+ variables.
101
+
102
+ Returns
103
+ -------
104
+ x : ndarray
105
+ The solution (or the result of the last iteration for
106
+ an unsuccessful call).
107
+ infodict : dict
108
+ A dictionary of optional outputs with the keys:
109
+
110
+ ``nfev``
111
+ number of function calls
112
+ ``njev``
113
+ number of Jacobian calls
114
+ ``fvec``
115
+ function evaluated at the output
116
+ ``fjac``
117
+ the orthogonal matrix, q, produced by the QR
118
+ factorization of the final approximate Jacobian
119
+ matrix, stored column wise
120
+ ``r``
121
+ upper triangular matrix produced by QR factorization
122
+ of the same matrix
123
+ ``qtf``
124
+ the vector ``(transpose(q) * fvec)``
125
+
126
+ ier : int
127
+ An integer flag. Set to 1 if a solution was found, otherwise refer
128
+ to `mesg` for more information.
129
+ mesg : str
130
+ If no solution is found, `mesg` details the cause of failure.
131
+
132
+ See Also
133
+ --------
134
+ root : Interface to root finding algorithms for multivariate
135
+ functions. See the ``method='hybr'`` in particular.
136
+
137
+ Notes
138
+ -----
139
+ ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
140
+
141
+ Examples
142
+ --------
143
+ Find a solution to the system of equations:
144
+ ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
145
+
146
+ >>> import numpy as np
147
+ >>> from scipy.optimize import fsolve
148
+ >>> def func(x):
149
+ ... return [x[0] * np.cos(x[1]) - 4,
150
+ ... x[1] * x[0] - x[1] - 5]
151
+ >>> root = fsolve(func, [1, 1])
152
+ >>> root
153
+ array([6.50409711, 0.90841421])
154
+ >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
155
+ array([ True, True])
156
+
157
+ """
158
+ options = {'col_deriv': col_deriv,
159
+ 'xtol': xtol,
160
+ 'maxfev': maxfev,
161
+ 'band': band,
162
+ 'eps': epsfcn,
163
+ 'factor': factor,
164
+ 'diag': diag}
165
+
166
+ res = _root_hybr(func, x0, args, jac=fprime, **options)
167
+ if full_output:
168
+ x = res['x']
169
+ info = {k: res.get(k)
170
+ for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res}
171
+ info['fvec'] = res['fun']
172
+ return x, info, res['status'], res['message']
173
+ else:
174
+ status = res['status']
175
+ msg = res['message']
176
+ if status == 0:
177
+ raise TypeError(msg)
178
+ elif status == 1:
179
+ pass
180
+ elif status in [2, 3, 4, 5]:
181
+ warnings.warn(msg, RuntimeWarning, stacklevel=2)
182
+ else:
183
+ raise TypeError(msg)
184
+ return res['x']
185
+
186
+
187
+ def _root_hybr(func, x0, args=(), jac=None,
188
+ col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
189
+ factor=100, diag=None, **unknown_options):
190
+ """
191
+ Find the roots of a multivariate function using MINPACK's hybrd and
192
+ hybrj routines (modified Powell method).
193
+
194
+ Options
195
+ -------
196
+ col_deriv : bool
197
+ Specify whether the Jacobian function computes derivatives down
198
+ the columns (faster, because there is no transpose operation).
199
+ xtol : float
200
+ The calculation will terminate if the relative error between two
201
+ consecutive iterates is at most `xtol`.
202
+ maxfev : int
203
+ The maximum number of calls to the function. If zero, then
204
+ ``100*(N+1)`` is the maximum where N is the number of elements
205
+ in `x0`.
206
+ band : tuple
207
+ If set to a two-sequence containing the number of sub- and
208
+ super-diagonals within the band of the Jacobi matrix, the
209
+ Jacobi matrix is considered banded (only for ``fprime=None``).
210
+ eps : float
211
+ A suitable step length for the forward-difference
212
+ approximation of the Jacobian (for ``fprime=None``). If
213
+ `eps` is less than the machine precision, it is assumed
214
+ that the relative errors in the functions are of the order of
215
+ the machine precision.
216
+ factor : float
217
+ A parameter determining the initial step bound
218
+ (``factor * || diag * x||``). Should be in the interval
219
+ ``(0.1, 100)``.
220
+ diag : sequence
221
+ N positive entries that serve as a scale factors for the
222
+ variables.
223
+
224
+ """
225
+ _check_unknown_options(unknown_options)
226
+ epsfcn = eps
227
+
228
+ x0 = asarray(x0).flatten()
229
+ n = len(x0)
230
+ if not isinstance(args, tuple):
231
+ args = (args,)
232
+ shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
233
+ if epsfcn is None:
234
+ epsfcn = finfo(dtype).eps
235
+ Dfun = jac
236
+ if Dfun is None:
237
+ if band is None:
238
+ ml, mu = -10, -10
239
+ else:
240
+ ml, mu = band[:2]
241
+ if maxfev == 0:
242
+ maxfev = 200 * (n + 1)
243
+ retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
244
+ ml, mu, epsfcn, factor, diag)
245
+ else:
246
+ _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
247
+ if (maxfev == 0):
248
+ maxfev = 100 * (n + 1)
249
+ retval = _minpack._hybrj(func, Dfun, x0, args, 1,
250
+ col_deriv, xtol, maxfev, factor, diag)
251
+
252
+ x, status = retval[0], retval[-1]
253
+
254
+ errors = {0: "Improper input parameters were entered.",
255
+ 1: "The solution converged.",
256
+ 2: "The number of calls to function has "
257
+ "reached maxfev = %d." % maxfev,
258
+ 3: "xtol=%f is too small, no further improvement "
259
+ "in the approximate\n solution "
260
+ "is possible." % xtol,
261
+ 4: "The iteration is not making good progress, as measured "
262
+ "by the \n improvement from the last five "
263
+ "Jacobian evaluations.",
264
+ 5: "The iteration is not making good progress, "
265
+ "as measured by the \n improvement from the last "
266
+ "ten iterations.",
267
+ 'unknown': "An error occurred."}
268
+
269
+ info = retval[1]
270
+ info['fun'] = info.pop('fvec')
271
+ sol = OptimizeResult(x=x, success=(status == 1), status=status,
272
+ method="hybr")
273
+ sol.update(info)
274
+ try:
275
+ sol['message'] = errors[status]
276
+ except KeyError:
277
+ sol['message'] = errors['unknown']
278
+
279
+ return sol
280
+
281
+
282
+ LEASTSQ_SUCCESS = [1, 2, 3, 4]
283
+ LEASTSQ_FAILURE = [5, 6, 7, 8]
284
+
285
+
286
+ def leastsq(func, x0, args=(), Dfun=None, full_output=False,
287
+ col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8,
288
+ gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
289
+ """
290
+ Minimize the sum of squares of a set of equations.
291
+
292
+ ::
293
+
294
+ x = arg min(sum(func(y)**2,axis=0))
295
+ y
296
+
297
+ Parameters
298
+ ----------
299
+ func : callable
300
+ Should take at least one (possibly length ``N`` vector) argument and
301
+ returns ``M`` floating point numbers. It must not return NaNs or
302
+ fitting might fail. ``M`` must be greater than or equal to ``N``.
303
+ x0 : ndarray
304
+ The starting estimate for the minimization.
305
+ args : tuple, optional
306
+ Any extra arguments to func are placed in this tuple.
307
+ Dfun : callable, optional
308
+ A function or method to compute the Jacobian of func with derivatives
309
+ across the rows. If this is None, the Jacobian will be estimated.
310
+ full_output : bool, optional
311
+ If ``True``, return all optional outputs (not just `x` and `ier`).
312
+ col_deriv : bool, optional
313
+ If ``True``, specify that the Jacobian function computes derivatives
314
+ down the columns (faster, because there is no transpose operation).
315
+ ftol : float, optional
316
+ Relative error desired in the sum of squares.
317
+ xtol : float, optional
318
+ Relative error desired in the approximate solution.
319
+ gtol : float, optional
320
+ Orthogonality desired between the function vector and the columns of
321
+ the Jacobian.
322
+ maxfev : int, optional
323
+ The maximum number of calls to the function. If `Dfun` is provided,
324
+ then the default `maxfev` is 100*(N+1) where N is the number of elements
325
+ in x0, otherwise the default `maxfev` is 200*(N+1).
326
+ epsfcn : float, optional
327
+ A variable used in determining a suitable step length for the forward-
328
+ difference approximation of the Jacobian (for Dfun=None).
329
+ Normally the actual step length will be sqrt(epsfcn)*x
330
+ If epsfcn is less than the machine precision, it is assumed that the
331
+ relative errors are of the order of the machine precision.
332
+ factor : float, optional
333
+ A parameter determining the initial step bound
334
+ (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
335
+ diag : sequence, optional
336
+ N positive entries that serve as a scale factors for the variables.
337
+
338
+ Returns
339
+ -------
340
+ x : ndarray
341
+ The solution (or the result of the last iteration for an unsuccessful
342
+ call).
343
+ cov_x : ndarray
344
+ The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
345
+ estimate of the Hessian. A value of None indicates a singular matrix,
346
+ which means the curvature in parameters `x` is numerically flat. To
347
+ obtain the covariance matrix of the parameters `x`, `cov_x` must be
348
+ multiplied by the variance of the residuals -- see curve_fit. Only
349
+ returned if `full_output` is ``True``.
350
+ infodict : dict
351
+ a dictionary of optional outputs with the keys:
352
+
353
+ ``nfev``
354
+ The number of function calls
355
+ ``fvec``
356
+ The function evaluated at the output
357
+ ``fjac``
358
+ A permutation of the R matrix of a QR
359
+ factorization of the final approximate
360
+ Jacobian matrix, stored column wise.
361
+ Together with ipvt, the covariance of the
362
+ estimate can be approximated.
363
+ ``ipvt``
364
+ An integer array of length N which defines
365
+ a permutation matrix, p, such that
366
+ fjac*p = q*r, where r is upper triangular
367
+ with diagonal elements of nonincreasing
368
+ magnitude. Column j of p is column ipvt(j)
369
+ of the identity matrix.
370
+ ``qtf``
371
+ The vector (transpose(q) * fvec).
372
+
373
+ Only returned if `full_output` is ``True``.
374
+ mesg : str
375
+ A string message giving information about the cause of failure.
376
+ Only returned if `full_output` is ``True``.
377
+ ier : int
378
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
379
+ found. Otherwise, the solution was not found. In either case, the
380
+ optional output variable 'mesg' gives more information.
381
+
382
+ See Also
383
+ --------
384
+ least_squares : Newer interface to solve nonlinear least-squares problems
385
+ with bounds on the variables. See ``method='lm'`` in particular.
386
+
387
+ Notes
388
+ -----
389
+ "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
390
+
391
+ cov_x is a Jacobian approximation to the Hessian of the least squares
392
+ objective function.
393
+ This approximation assumes that the objective function is based on the
394
+ difference between some observed target data (ydata) and a (non-linear)
395
+ function of the parameters `f(xdata, params)` ::
396
+
397
+ func(params) = ydata - f(xdata, params)
398
+
399
+ so that the objective function is ::
400
+
401
+ min sum((ydata - f(xdata, params))**2, axis=0)
402
+ params
403
+
404
+ The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
405
+ or whether `x0` is a scalar.
406
+
407
+ Examples
408
+ --------
409
+ >>> from scipy.optimize import leastsq
410
+ >>> def func(x):
411
+ ... return 2*(x-3)**2+1
412
+ >>> leastsq(func, 0)
413
+ (array([2.99999999]), 1)
414
+
415
+ """
416
+ x0 = asarray(x0).flatten()
417
+ n = len(x0)
418
+ if not isinstance(args, tuple):
419
+ args = (args,)
420
+ shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
421
+ m = shape[0]
422
+
423
+ if n > m:
424
+ raise TypeError(f"Improper input: func input vector length N={n} must"
425
+ f" not exceed func output vector length M={m}")
426
+
427
+ if epsfcn is None:
428
+ epsfcn = finfo(dtype).eps
429
+
430
+ if Dfun is None:
431
+ if maxfev == 0:
432
+ maxfev = 200*(n + 1)
433
+ retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
434
+ gtol, maxfev, epsfcn, factor, diag)
435
+ else:
436
+ if col_deriv:
437
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
438
+ else:
439
+ _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
440
+ if maxfev == 0:
441
+ maxfev = 100 * (n + 1)
442
+ retval = _minpack._lmder(func, Dfun, x0, args, full_output,
443
+ col_deriv, ftol, xtol, gtol, maxfev,
444
+ factor, diag)
445
+
446
+ errors = {0: ["Improper input parameters.", TypeError],
447
+ 1: ["Both actual and predicted relative reductions "
448
+ "in the sum of squares\n are at most %f" % ftol, None],
449
+ 2: ["The relative error between two consecutive "
450
+ "iterates is at most %f" % xtol, None],
451
+ 3: ["Both actual and predicted relative reductions in "
452
+ f"the sum of squares\n are at most {ftol:f} and the "
453
+ "relative error between two consecutive "
454
+ f"iterates is at \n most {xtol:f}", None],
455
+ 4: ["The cosine of the angle between func(x) and any "
456
+ "column of the\n Jacobian is at most %f in "
457
+ "absolute value" % gtol, None],
458
+ 5: ["Number of calls to function has reached "
459
+ "maxfev = %d." % maxfev, ValueError],
460
+ 6: ["ftol=%f is too small, no further reduction "
461
+ "in the sum of squares\n is possible." % ftol,
462
+ ValueError],
463
+ 7: ["xtol=%f is too small, no further improvement in "
464
+ "the approximate\n solution is possible." % xtol,
465
+ ValueError],
466
+ 8: ["gtol=%f is too small, func(x) is orthogonal to the "
467
+ "columns of\n the Jacobian to machine "
468
+ "precision." % gtol, ValueError]}
469
+
470
+ # The FORTRAN return value (possible return values are >= 0 and <= 8)
471
+ info = retval[-1]
472
+
473
+ if full_output:
474
+ cov_x = None
475
+ if info in LEASTSQ_SUCCESS:
476
+ # This was
477
+ # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
478
+ # r = triu(transpose(retval[1]['fjac'])[:n, :])
479
+ # R = dot(r, perm)
480
+ # cov_x = inv(dot(transpose(R), R))
481
+ # but the explicit dot product was not necessary and sometimes
482
+ # the result was not symmetric positive definite. See gh-4555.
483
+ perm = retval[1]['ipvt'] - 1
484
+ n = len(perm)
485
+ r = triu(transpose(retval[1]['fjac'])[:n, :])
486
+ inv_triu = linalg.get_lapack_funcs('trtri', (r,))
487
+ try:
488
+ # inverse of permuted matrix is a permutation of matrix inverse
489
+ invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
490
+ if trtri_info != 0: # explicit comparison for readability
491
+ raise LinAlgError(f'trtri returned info {trtri_info}')
492
+ invR[perm] = invR.copy()
493
+ cov_x = invR @ invR.T
494
+ except (LinAlgError, ValueError):
495
+ pass
496
+ return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
497
+ else:
498
+ if info in LEASTSQ_FAILURE:
499
+ warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2)
500
+ elif info == 0:
501
+ raise errors[info][1](errors[info][0])
502
+ return retval[0], info
503
+
504
+
505
+ def _lightweight_memoizer(f):
506
+ # very shallow memoization to address gh-13670: only remember the first set
507
+ # of parameters and corresponding function value, and only attempt to use
508
+ # them twice (the number of times the function is evaluated at x0).
509
+ def _memoized_func(params):
510
+ if _memoized_func.skip_lookup:
511
+ return f(params)
512
+
513
+ if np.all(_memoized_func.last_params == params):
514
+ return _memoized_func.last_val
515
+ elif _memoized_func.last_params is not None:
516
+ _memoized_func.skip_lookup = True
517
+
518
+ val = f(params)
519
+
520
+ if _memoized_func.last_params is None:
521
+ _memoized_func.last_params = np.copy(params)
522
+ _memoized_func.last_val = val
523
+
524
+ return val
525
+
526
+ _memoized_func.last_params = None
527
+ _memoized_func.last_val = None
528
+ _memoized_func.skip_lookup = False
529
+ return _memoized_func
530
+
531
+
532
+ def _wrap_func(func, xdata, ydata, transform):
533
+ if transform is None:
534
+ def func_wrapped(params):
535
+ return func(xdata, *params) - ydata
536
+ elif transform.size == 1 or transform.ndim == 1:
537
+ def func_wrapped(params):
538
+ return transform * (func(xdata, *params) - ydata)
539
+ else:
540
+ # Chisq = (y - yd)^T C^{-1} (y-yd)
541
+ # transform = L such that C = L L^T
542
+ # C^{-1} = L^{-T} L^{-1}
543
+ # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
544
+ # Define (y-yd)' = L^{-1} (y-yd)
545
+ # by solving
546
+ # L (y-yd)' = (y-yd)
547
+ # and minimize (y-yd)'^T (y-yd)'
548
+ def func_wrapped(params):
549
+ return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
550
+ return func_wrapped
551
+
552
+
553
+ def _wrap_jac(jac, xdata, transform):
554
+ if transform is None:
555
+ def jac_wrapped(params):
556
+ return jac(xdata, *params)
557
+ elif transform.ndim == 1:
558
+ def jac_wrapped(params):
559
+ return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
560
+ else:
561
+ def jac_wrapped(params):
562
+ return solve_triangular(transform,
563
+ np.asarray(jac(xdata, *params)),
564
+ lower=True)
565
+ return jac_wrapped
566
+
567
+
568
+ def _initialize_feasible(lb, ub):
569
+ p0 = np.ones_like(lb)
570
+ lb_finite = np.isfinite(lb)
571
+ ub_finite = np.isfinite(ub)
572
+
573
+ mask = lb_finite & ub_finite
574
+ p0[mask] = 0.5 * (lb[mask] + ub[mask])
575
+
576
+ mask = lb_finite & ~ub_finite
577
+ p0[mask] = lb[mask] + 1
578
+
579
+ mask = ~lb_finite & ub_finite
580
+ p0[mask] = ub[mask] - 1
581
+
582
+ return p0
583
+
584
+
585
+ def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
586
+ check_finite=None, bounds=(-np.inf, np.inf), method=None,
587
+ jac=None, *, full_output=False, nan_policy=None,
588
+ **kwargs):
589
+ """
590
+ Use non-linear least squares to fit a function, f, to data.
591
+
592
+ Assumes ``ydata = f(xdata, *params) + eps``.
593
+
594
+ Parameters
595
+ ----------
596
+ f : callable
597
+ The model function, f(x, ...). It must take the independent
598
+ variable as the first argument and the parameters to fit as
599
+ separate remaining arguments.
600
+ xdata : array_like
601
+ The independent variable where the data is measured.
602
+ Should usually be an M-length sequence or an (k,M)-shaped array for
603
+ functions with k predictors, and each element should be float
604
+ convertible if it is an array like object.
605
+ ydata : array_like
606
+ The dependent data, a length M array - nominally ``f(xdata, ...)``.
607
+ p0 : array_like, optional
608
+ Initial guess for the parameters (length N). If None, then the
609
+ initial values will all be 1 (if the number of parameters for the
610
+ function can be determined using introspection, otherwise a
611
+ ValueError is raised).
612
+ sigma : None or scalar or M-length sequence or MxM array, optional
613
+ Determines the uncertainty in `ydata`. If we define residuals as
614
+ ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
615
+ depends on its number of dimensions:
616
+
617
+ - A scalar or 1-D `sigma` should contain values of standard deviations of
618
+ errors in `ydata`. In this case, the optimized function is
619
+ ``chisq = sum((r / sigma) ** 2)``.
620
+
621
+ - A 2-D `sigma` should contain the covariance matrix of
622
+ errors in `ydata`. In this case, the optimized function is
623
+ ``chisq = r.T @ inv(sigma) @ r``.
624
+
625
+ .. versionadded:: 0.19
626
+
627
+ None (default) is equivalent of 1-D `sigma` filled with ones.
628
+ absolute_sigma : bool, optional
629
+ If True, `sigma` is used in an absolute sense and the estimated parameter
630
+ covariance `pcov` reflects these absolute values.
631
+
632
+ If False (default), only the relative magnitudes of the `sigma` values matter.
633
+ The returned parameter covariance matrix `pcov` is based on scaling
634
+ `sigma` by a constant factor. This constant is set by demanding that the
635
+ reduced `chisq` for the optimal parameters `popt` when using the
636
+ *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
637
+ match the sample variance of the residuals after the fit. Default is False.
638
+ Mathematically,
639
+ ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
640
+ check_finite : bool, optional
641
+ If True, check that the input arrays do not contain nans of infs,
642
+ and raise a ValueError if they do. Setting this parameter to
643
+ False may silently produce nonsensical results if the input arrays
644
+ do contain nans. Default is True if `nan_policy` is not specified
645
+ explicitly and False otherwise.
646
+ bounds : 2-tuple of array_like or `Bounds`, optional
647
+ Lower and upper bounds on parameters. Defaults to no bounds.
648
+ There are two ways to specify the bounds:
649
+
650
+ - Instance of `Bounds` class.
651
+
652
+ - 2-tuple of array_like: Each element of the tuple must be either
653
+ an array with the length equal to the number of parameters, or a
654
+ scalar (in which case the bound is taken to be the same for all
655
+ parameters). Use ``np.inf`` with an appropriate sign to disable
656
+ bounds on all or some parameters.
657
+
658
+ method : {'lm', 'trf', 'dogbox'}, optional
659
+ Method to use for optimization. See `least_squares` for more details.
660
+ Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
661
+ provided. The method 'lm' won't work when the number of observations
662
+ is less than the number of variables, use 'trf' or 'dogbox' in this
663
+ case.
664
+
665
+ .. versionadded:: 0.17
666
+ jac : callable, string or None, optional
667
+ Function with signature ``jac(x, ...)`` which computes the Jacobian
668
+ matrix of the model function with respect to parameters as a dense
669
+ array_like structure. It will be scaled according to provided `sigma`.
670
+ If None (default), the Jacobian will be estimated numerically.
671
+ String keywords for 'trf' and 'dogbox' methods can be used to select
672
+ a finite difference scheme, see `least_squares`.
673
+
674
+ .. versionadded:: 0.18
675
+ full_output : boolean, optional
676
+ If True, this function returns additioal information: `infodict`,
677
+ `mesg`, and `ier`.
678
+
679
+ .. versionadded:: 1.9
680
+ nan_policy : {'raise', 'omit', None}, optional
681
+ Defines how to handle when input contains nan.
682
+ The following options are available (default is None):
683
+
684
+ * 'raise': throws an error
685
+ * 'omit': performs the calculations ignoring nan values
686
+ * None: no special handling of NaNs is performed
687
+ (except what is done by check_finite); the behavior when NaNs
688
+ are present is implementation-dependent and may change.
689
+
690
+ Note that if this value is specified explicitly (not None),
691
+ `check_finite` will be set as False.
692
+
693
+ .. versionadded:: 1.11
694
+ **kwargs
695
+ Keyword arguments passed to `leastsq` for ``method='lm'`` or
696
+ `least_squares` otherwise.
697
+
698
+ Returns
699
+ -------
700
+ popt : array
701
+ Optimal values for the parameters so that the sum of the squared
702
+ residuals of ``f(xdata, *popt) - ydata`` is minimized.
703
+ pcov : 2-D array
704
+ The estimated approximate covariance of popt. The diagonals provide
705
+ the variance of the parameter estimate. To compute one standard
706
+ deviation errors on the parameters, use
707
+ ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between
708
+ `cov` and parameter error estimates is derived based on a linear
709
+ approximation to the model function around the optimum [1].
710
+ When this approximation becomes inaccurate, `cov` may not provide an
711
+ accurate measure of uncertainty.
712
+
713
+ How the `sigma` parameter affects the estimated covariance
714
+ depends on `absolute_sigma` argument, as described above.
715
+
716
+ If the Jacobian matrix at the solution doesn't have a full rank, then
717
+ 'lm' method returns a matrix filled with ``np.inf``, on the other hand
718
+ 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
719
+ the covariance matrix. Covariance matrices with large condition numbers
720
+ (e.g. computed with `numpy.linalg.cond`) may indicate that results are
721
+ unreliable.
722
+ infodict : dict (returned only if `full_output` is True)
723
+ a dictionary of optional outputs with the keys:
724
+
725
+ ``nfev``
726
+ The number of function calls. Methods 'trf' and 'dogbox' do not
727
+ count function calls for numerical Jacobian approximation,
728
+ as opposed to 'lm' method.
729
+ ``fvec``
730
+ The residual values evaluated at the solution, for a 1-D `sigma`
731
+ this is ``(f(x, *popt) - ydata)/sigma``.
732
+ ``fjac``
733
+ A permutation of the R matrix of a QR
734
+ factorization of the final approximate
735
+ Jacobian matrix, stored column wise.
736
+ Together with ipvt, the covariance of the
737
+ estimate can be approximated.
738
+ Method 'lm' only provides this information.
739
+ ``ipvt``
740
+ An integer array of length N which defines
741
+ a permutation matrix, p, such that
742
+ fjac*p = q*r, where r is upper triangular
743
+ with diagonal elements of nonincreasing
744
+ magnitude. Column j of p is column ipvt(j)
745
+ of the identity matrix.
746
+ Method 'lm' only provides this information.
747
+ ``qtf``
748
+ The vector (transpose(q) * fvec).
749
+ Method 'lm' only provides this information.
750
+
751
+ .. versionadded:: 1.9
752
+ mesg : str (returned only if `full_output` is True)
753
+ A string message giving information about the solution.
754
+
755
+ .. versionadded:: 1.9
756
+ ier : int (returned only if `full_output` is True)
757
+ An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
758
+ found. Otherwise, the solution was not found. In either case, the
759
+ optional output variable `mesg` gives more information.
760
+
761
+ .. versionadded:: 1.9
762
+
763
+ Raises
764
+ ------
765
+ ValueError
766
+ if either `ydata` or `xdata` contain NaNs, or if incompatible options
767
+ are used.
768
+
769
+ RuntimeError
770
+ if the least-squares minimization fails.
771
+
772
+ OptimizeWarning
773
+ if covariance of the parameters can not be estimated.
774
+
775
+ See Also
776
+ --------
777
+ least_squares : Minimize the sum of squares of nonlinear functions.
778
+ scipy.stats.linregress : Calculate a linear least squares regression for
779
+ two sets of measurements.
780
+
781
+ Notes
782
+ -----
783
+ Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
784
+ are ``float64``, or else the optimization may return incorrect results.
785
+
786
+ With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
787
+ through `leastsq`. Note that this algorithm can only deal with
788
+ unconstrained problems.
789
+
790
+ Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
791
+ the docstring of `least_squares` for more information.
792
+
793
+ Parameters to be fitted must have similar scale. Differences of multiple
794
+ orders of magnitude can lead to incorrect results. For the 'trf' and
795
+ 'dogbox' methods, the `x_scale` keyword argument can be used to scale
796
+ the parameters.
797
+
798
+ References
799
+ ----------
800
+ [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear
801
+ regression in groundwater flow: Three case studies. Water Resources
802
+ Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804`
803
+
804
+ Examples
805
+ --------
806
+ >>> import numpy as np
807
+ >>> import matplotlib.pyplot as plt
808
+ >>> from scipy.optimize import curve_fit
809
+
810
+ >>> def func(x, a, b, c):
811
+ ... return a * np.exp(-b * x) + c
812
+
813
+ Define the data to be fit with some noise:
814
+
815
+ >>> xdata = np.linspace(0, 4, 50)
816
+ >>> y = func(xdata, 2.5, 1.3, 0.5)
817
+ >>> rng = np.random.default_rng()
818
+ >>> y_noise = 0.2 * rng.normal(size=xdata.size)
819
+ >>> ydata = y + y_noise
820
+ >>> plt.plot(xdata, ydata, 'b-', label='data')
821
+
822
+ Fit for the parameters a, b, c of the function `func`:
823
+
824
+ >>> popt, pcov = curve_fit(func, xdata, ydata)
825
+ >>> popt
826
+ array([2.56274217, 1.37268521, 0.47427475])
827
+ >>> plt.plot(xdata, func(xdata, *popt), 'r-',
828
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
829
+
830
+ Constrain the optimization to the region of ``0 <= a <= 3``,
831
+ ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
832
+
833
+ >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
834
+ >>> popt
835
+ array([2.43736712, 1. , 0.34463856])
836
+ >>> plt.plot(xdata, func(xdata, *popt), 'g--',
837
+ ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
838
+
839
+ >>> plt.xlabel('x')
840
+ >>> plt.ylabel('y')
841
+ >>> plt.legend()
842
+ >>> plt.show()
843
+
844
+ For reliable results, the model `func` should not be overparametrized;
845
+ redundant parameters can cause unreliable covariance matrices and, in some
846
+ cases, poorer quality fits. As a quick check of whether the model may be
847
+ overparameterized, calculate the condition number of the covariance matrix:
848
+
849
+ >>> np.linalg.cond(pcov)
850
+ 34.571092161547405 # may vary
851
+
852
+ The value is small, so it does not raise much concern. If, however, we were
853
+ to add a fourth parameter ``d`` to `func` with the same effect as ``a``:
854
+
855
+ >>> def func2(x, a, b, c, d):
856
+ ... return a * d * np.exp(-b * x) + c # a and d are redundant
857
+ >>> popt, pcov = curve_fit(func2, xdata, ydata)
858
+ >>> np.linalg.cond(pcov)
859
+ 1.13250718925596e+32 # may vary
860
+
861
+ Such a large value is cause for concern. The diagonal elements of the
862
+ covariance matrix, which is related to uncertainty of the fit, gives more
863
+ information:
864
+
865
+ >>> np.diag(pcov)
866
+ array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary
867
+
868
+ Note that the first and last terms are much larger than the other elements,
869
+ suggesting that the optimal values of these parameters are ambiguous and
870
+ that only one of these parameters is needed in the model.
871
+
872
+ If the optimal parameters of `f` differ by multiple orders of magnitude, the
873
+ resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any
874
+ results:
875
+
876
+ >>> ydata = func(xdata, 500000, 0.01, 15)
877
+ >>> try:
878
+ ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf')
879
+ ... except RuntimeError as e:
880
+ ... print(e)
881
+ Optimal parameters not found: The maximum number of function evaluations is exceeded.
882
+
883
+ If parameter scale is roughly known beforehand, it can be defined in
884
+ `x_scale` argument:
885
+
886
+ >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf',
887
+ ... x_scale = [1000, 1, 1])
888
+ >>> popt
889
+ array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01])
890
+ """
891
+ if p0 is None:
892
+ # determine number of parameters by inspecting the function
893
+ sig = _getfullargspec(f)
894
+ args = sig.args
895
+ if len(args) < 2:
896
+ raise ValueError("Unable to determine number of fit parameters.")
897
+ n = len(args) - 1
898
+ else:
899
+ p0 = np.atleast_1d(p0)
900
+ n = p0.size
901
+
902
+ if isinstance(bounds, Bounds):
903
+ lb, ub = bounds.lb, bounds.ub
904
+ else:
905
+ lb, ub = prepare_bounds(bounds, n)
906
+ if p0 is None:
907
+ p0 = _initialize_feasible(lb, ub)
908
+
909
+ bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
910
+ if method is None:
911
+ if bounded_problem:
912
+ method = 'trf'
913
+ else:
914
+ method = 'lm'
915
+
916
+ if method == 'lm' and bounded_problem:
917
+ raise ValueError("Method 'lm' only works for unconstrained problems. "
918
+ "Use 'trf' or 'dogbox' instead.")
919
+
920
+ if check_finite is None:
921
+ check_finite = True if nan_policy is None else False
922
+
923
+ # optimization may produce garbage for float32 inputs, cast them to float64
924
+ if check_finite:
925
+ ydata = np.asarray_chkfinite(ydata, float)
926
+ else:
927
+ ydata = np.asarray(ydata, float)
928
+
929
+ if isinstance(xdata, (list, tuple, np.ndarray)):
930
+ # `xdata` is passed straight to the user-defined `f`, so allow
931
+ # non-array_like `xdata`.
932
+ if check_finite:
933
+ xdata = np.asarray_chkfinite(xdata, float)
934
+ else:
935
+ xdata = np.asarray(xdata, float)
936
+
937
+ if ydata.size == 0:
938
+ raise ValueError("`ydata` must not be empty!")
939
+
940
+ # nan handling is needed only if check_finite is False because if True,
941
+ # the x-y data are already checked, and they don't contain nans.
942
+ if not check_finite and nan_policy is not None:
943
+ if nan_policy == "propagate":
944
+ raise ValueError("`nan_policy='propagate'` is not supported "
945
+ "by this function.")
946
+
947
+ policies = [None, 'raise', 'omit']
948
+ x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy,
949
+ policies=policies)
950
+ y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy,
951
+ policies=policies)
952
+
953
+ if (x_contains_nan or y_contains_nan) and nan_policy == 'omit':
954
+ # ignore NaNs for N dimensional arrays
955
+ has_nan = np.isnan(xdata)
956
+ has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1)))
957
+ has_nan |= np.isnan(ydata)
958
+
959
+ xdata = xdata[..., ~has_nan]
960
+ ydata = ydata[~has_nan]
961
+
962
+ # Determine type of sigma
963
+ if sigma is not None:
964
+ sigma = np.asarray(sigma)
965
+
966
+ # if 1-D or a scalar, sigma are errors, define transform = 1/sigma
967
+ if sigma.size == 1 or sigma.shape == (ydata.size, ):
968
+ transform = 1.0 / sigma
969
+ # if 2-D, sigma is the covariance matrix,
970
+ # define transform = L such that L L^T = C
971
+ elif sigma.shape == (ydata.size, ydata.size):
972
+ try:
973
+ # scipy.linalg.cholesky requires lower=True to return L L^T = A
974
+ transform = cholesky(sigma, lower=True)
975
+ except LinAlgError as e:
976
+ raise ValueError("`sigma` must be positive definite.") from e
977
+ else:
978
+ raise ValueError("`sigma` has incorrect shape.")
979
+ else:
980
+ transform = None
981
+
982
+ func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform))
983
+
984
+ if callable(jac):
985
+ jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform))
986
+ elif jac is None and method != 'lm':
987
+ jac = '2-point'
988
+
989
+ if 'args' in kwargs:
990
+ # The specification for the model function `f` does not support
991
+ # additional arguments. Refer to the `curve_fit` docstring for
992
+ # acceptable call signatures of `f`.
993
+ raise ValueError("'args' is not a supported keyword argument.")
994
+
995
+ if method == 'lm':
996
+ # if ydata.size == 1, this might be used for broadcast.
997
+ if ydata.size != 1 and n > ydata.size:
998
+ raise TypeError(f"The number of func parameters={n} must not"
999
+ f" exceed the number of data points={ydata.size}")
1000
+ res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
1001
+ popt, pcov, infodict, errmsg, ier = res
1002
+ ysize = len(infodict['fvec'])
1003
+ cost = np.sum(infodict['fvec'] ** 2)
1004
+ if ier not in [1, 2, 3, 4]:
1005
+ raise RuntimeError("Optimal parameters not found: " + errmsg)
1006
+ else:
1007
+ # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
1008
+ if 'max_nfev' not in kwargs:
1009
+ kwargs['max_nfev'] = kwargs.pop('maxfev', None)
1010
+
1011
+ res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
1012
+ **kwargs)
1013
+
1014
+ if not res.success:
1015
+ raise RuntimeError("Optimal parameters not found: " + res.message)
1016
+
1017
+ infodict = dict(nfev=res.nfev, fvec=res.fun)
1018
+ ier = res.status
1019
+ errmsg = res.message
1020
+
1021
+ ysize = len(res.fun)
1022
+ cost = 2 * res.cost # res.cost is half sum of squares!
1023
+ popt = res.x
1024
+
1025
+ # Do Moore-Penrose inverse discarding zero singular values.
1026
+ _, s, VT = svd(res.jac, full_matrices=False)
1027
+ threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
1028
+ s = s[s > threshold]
1029
+ VT = VT[:s.size]
1030
+ pcov = np.dot(VT.T / s**2, VT)
1031
+
1032
+ warn_cov = False
1033
+ if pcov is None or np.isnan(pcov).any():
1034
+ # indeterminate covariance
1035
+ pcov = zeros((len(popt), len(popt)), dtype=float)
1036
+ pcov.fill(inf)
1037
+ warn_cov = True
1038
+ elif not absolute_sigma:
1039
+ if ysize > p0.size:
1040
+ s_sq = cost / (ysize - p0.size)
1041
+ pcov = pcov * s_sq
1042
+ else:
1043
+ pcov.fill(inf)
1044
+ warn_cov = True
1045
+
1046
+ if warn_cov:
1047
+ warnings.warn('Covariance of the parameters could not be estimated',
1048
+ category=OptimizeWarning, stacklevel=2)
1049
+
1050
+ if full_output:
1051
+ return popt, pcov, infodict, errmsg, ier
1052
+ else:
1053
+ return popt, pcov
1054
+
1055
+
1056
+ def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
1057
+ """Perform a simple check on the gradient for correctness.
1058
+
1059
+ """
1060
+
1061
+ x = atleast_1d(x0)
1062
+ n = len(x)
1063
+ x = x.reshape((n,))
1064
+ fvec = atleast_1d(fcn(x, *args))
1065
+ m = len(fvec)
1066
+ fvec = fvec.reshape((m,))
1067
+ ldfjac = m
1068
+ fjac = atleast_1d(Dfcn(x, *args))
1069
+ fjac = fjac.reshape((m, n))
1070
+ if col_deriv == 0:
1071
+ fjac = transpose(fjac)
1072
+
1073
+ xp = zeros((n,), float)
1074
+ err = zeros((m,), float)
1075
+ fvecp = None
1076
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
1077
+
1078
+ fvecp = atleast_1d(fcn(xp, *args))
1079
+ fvecp = fvecp.reshape((m,))
1080
+ _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
1081
+
1082
+ good = (prod(greater(err, 0.5), axis=0))
1083
+
1084
+ return (good, err)
1085
+
1086
+
1087
+ def _del2(p0, p1, d):
1088
+ return p0 - np.square(p1 - p0) / d
1089
+
1090
+
1091
+ def _relerr(actual, desired):
1092
+ return (actual - desired) / desired
1093
+
1094
+
1095
+ def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
1096
+ p0 = x0
1097
+ for i in range(maxiter):
1098
+ p1 = func(p0, *args)
1099
+ if use_accel:
1100
+ p2 = func(p1, *args)
1101
+ d = p2 - 2.0 * p1 + p0
1102
+ p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
1103
+ else:
1104
+ p = p1
1105
+ relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
1106
+ if np.all(np.abs(relerr) < xtol):
1107
+ return p
1108
+ p0 = p
1109
+ msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
1110
+ raise RuntimeError(msg)
1111
+
1112
+
1113
+ def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
1114
+ """
1115
+ Find a fixed point of the function.
1116
+
1117
+ Given a function of one or more variables and a starting point, find a
1118
+ fixed point of the function: i.e., where ``func(x0) == x0``.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ func : function
1123
+ Function to evaluate.
1124
+ x0 : array_like
1125
+ Fixed point of function.
1126
+ args : tuple, optional
1127
+ Extra arguments to `func`.
1128
+ xtol : float, optional
1129
+ Convergence tolerance, defaults to 1e-08.
1130
+ maxiter : int, optional
1131
+ Maximum number of iterations, defaults to 500.
1132
+ method : {"del2", "iteration"}, optional
1133
+ Method of finding the fixed-point, defaults to "del2",
1134
+ which uses Steffensen's Method with Aitken's ``Del^2``
1135
+ convergence acceleration [1]_. The "iteration" method simply iterates
1136
+ the function until convergence is detected, without attempting to
1137
+ accelerate the convergence.
1138
+
1139
+ References
1140
+ ----------
1141
+ .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
1142
+
1143
+ Examples
1144
+ --------
1145
+ >>> import numpy as np
1146
+ >>> from scipy import optimize
1147
+ >>> def func(x, c1, c2):
1148
+ ... return np.sqrt(c1/(x+c2))
1149
+ >>> c1 = np.array([10,12.])
1150
+ >>> c2 = np.array([3, 5.])
1151
+ >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
1152
+ array([ 1.4920333 , 1.37228132])
1153
+
1154
+ """
1155
+ use_accel = {'del2': True, 'iteration': False}[method]
1156
+ x0 = _asarray_validated(x0, as_inexact=True)
1157
+ return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (152 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nnls.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import solve, LinAlgWarning
3
+ import warnings
4
+
5
+ __all__ = ['nnls']
6
+
7
+
8
+ def nnls(A, b, maxiter=None, *, atol=None):
9
+ """
10
+ Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.
11
+
12
+ This problem, often called as NonNegative Least Squares, is a convex
13
+ optimization problem with convex constraints. It typically arises when
14
+ the ``x`` models quantities for which only nonnegative values are
15
+ attainable; weight of ingredients, component costs and so on.
16
+
17
+ Parameters
18
+ ----------
19
+ A : (m, n) ndarray
20
+ Coefficient array
21
+ b : (m,) ndarray, float
22
+ Right-hand side vector.
23
+ maxiter: int, optional
24
+ Maximum number of iterations, optional. Default value is ``3 * n``.
25
+ atol: float
26
+ Tolerance value used in the algorithm to assess closeness to zero in
27
+ the projected residual ``(A.T @ (A x - b)`` entries. Increasing this
28
+ value relaxes the solution constraints. A typical relaxation value can
29
+ be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``.
30
+ This value is not set as default since the norm operation becomes
31
+ expensive for large problems hence can be used only when necessary.
32
+
33
+ Returns
34
+ -------
35
+ x : ndarray
36
+ Solution vector.
37
+ rnorm : float
38
+ The 2-norm of the residual, ``|| Ax-b ||_2``.
39
+
40
+ See Also
41
+ --------
42
+ lsq_linear : Linear least squares with bounds on the variables
43
+
44
+ Notes
45
+ -----
46
+ The code is based on [2]_ which is an improved version of the classical
47
+ algorithm of [1]_. It utilizes an active set method and solves the KKT
48
+ (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem.
49
+
50
+ References
51
+ ----------
52
+ .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM,
53
+ 1995, :doi:`10.1137/1.9781611971217`
54
+ .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity-
55
+ Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997,
56
+ :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L`
57
+
58
+ Examples
59
+ --------
60
+ >>> import numpy as np
61
+ >>> from scipy.optimize import nnls
62
+ ...
63
+ >>> A = np.array([[1, 0], [1, 0], [0, 1]])
64
+ >>> b = np.array([2, 1, 1])
65
+ >>> nnls(A, b)
66
+ (array([1.5, 1. ]), 0.7071067811865475)
67
+
68
+ >>> b = np.array([-1, -1, -1])
69
+ >>> nnls(A, b)
70
+ (array([0., 0.]), 1.7320508075688772)
71
+
72
+ """
73
+
74
+ A = np.asarray_chkfinite(A)
75
+ b = np.asarray_chkfinite(b)
76
+
77
+ if len(A.shape) != 2:
78
+ raise ValueError("Expected a two-dimensional array (matrix)" +
79
+ f", but the shape of A is {A.shape}")
80
+ if len(b.shape) != 1:
81
+ raise ValueError("Expected a one-dimensional array (vector)" +
82
+ f", but the shape of b is {b.shape}")
83
+
84
+ m, n = A.shape
85
+
86
+ if m != b.shape[0]:
87
+ raise ValueError(
88
+ "Incompatible dimensions. The first dimension of " +
89
+ f"A is {m}, while the shape of b is {(b.shape[0], )}")
90
+
91
+ x, rnorm, mode = _nnls(A, b, maxiter, tol=atol)
92
+ if mode != 1:
93
+ raise RuntimeError("Maximum number of iterations reached.")
94
+
95
+ return x, rnorm
96
+
97
+
98
+ def _nnls(A, b, maxiter=None, tol=None):
99
+ """
100
+ This is a single RHS algorithm from ref [2] above. For multiple RHS
101
+ support, the algorithm is given in :doi:`10.1002/cem.889`
102
+ """
103
+ m, n = A.shape
104
+
105
+ AtA = A.T @ A
106
+ Atb = b @ A # Result is 1D - let NumPy figure it out
107
+
108
+ if not maxiter:
109
+ maxiter = 3*n
110
+ if tol is None:
111
+ tol = 10 * max(m, n) * np.spacing(1.)
112
+
113
+ # Initialize vars
114
+ x = np.zeros(n, dtype=np.float64)
115
+ s = np.zeros(n, dtype=np.float64)
116
+ # Inactive constraint switches
117
+ P = np.zeros(n, dtype=bool)
118
+
119
+ # Projected residual
120
+ w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term
121
+
122
+ # Overall iteration counter
123
+ # Outer loop is not counted, inner iter is counted across outer spins
124
+ iter = 0
125
+
126
+ while (not P.all()) and (w[~P] > tol).any(): # B
127
+ # Get the "most" active coeff index and move to inactive set
128
+ k = np.argmax(w * (~P)) # B.2
129
+ P[k] = True # B.3
130
+
131
+ # Iteration solution
132
+ s[:] = 0.
133
+ # B.4
134
+ with warnings.catch_warnings():
135
+ warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
136
+ category=LinAlgWarning)
137
+ s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False)
138
+
139
+ # Inner loop
140
+ while (iter < maxiter) and (s[P].min() < 0): # C.1
141
+ iter += 1
142
+ inds = P * (s < 0)
143
+ alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2
144
+ x *= (1 - alpha)
145
+ x += alpha*s
146
+ P[x <= tol] = False
147
+ with warnings.catch_warnings():
148
+ warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
149
+ category=LinAlgWarning)
150
+ s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym',
151
+ check_finite=False)
152
+ s[~P] = 0 # C.6
153
+
154
+ x[:] = s[:]
155
+ w[:] = Atb - AtA @ x
156
+
157
+ if iter == maxiter:
158
+ # Typically following line should return
159
+ # return x, np.linalg.norm(A@x - b), -1
160
+ # however at the top level, -1 raises an exception wasting norm
161
+ # Instead return dummy number 0.
162
+ return x, 0., -1
163
+
164
+ return x, np.linalg.norm(A@x - b), 1
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nonlin.py ADDED
@@ -0,0 +1,1584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2009, Pauli Virtanen <[email protected]>
2
+ # Distributed under the same license as SciPy.
3
+
4
+ import inspect
5
+ import sys
6
+ import warnings
7
+
8
+ import numpy as np
9
+ from numpy import asarray, dot, vdot
10
+
11
+ from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
12
+ import scipy.sparse.linalg
13
+ import scipy.sparse
14
+ from scipy.linalg import get_blas_funcs
15
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
16
+ from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
17
+
18
+
19
+ __all__ = [
20
+ 'broyden1', 'broyden2', 'anderson', 'linearmixing',
21
+ 'diagbroyden', 'excitingmixing', 'newton_krylov',
22
+ 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence']
23
+
24
+ #------------------------------------------------------------------------------
25
+ # Utility functions
26
+ #------------------------------------------------------------------------------
27
+
28
+
29
+ class NoConvergence(Exception):
30
+ """Exception raised when nonlinear solver fails to converge within the specified
31
+ `maxiter`."""
32
+ pass
33
+
34
+
35
+ def maxnorm(x):
36
+ return np.absolute(x).max()
37
+
38
+
39
+ def _as_inexact(x):
40
+ """Return `x` as an array, of either floats or complex floats"""
41
+ x = asarray(x)
42
+ if not np.issubdtype(x.dtype, np.inexact):
43
+ return asarray(x, dtype=np.float64)
44
+ return x
45
+
46
+
47
+ def _array_like(x, x0):
48
+ """Return ndarray `x` as same array subclass and shape as `x0`"""
49
+ x = np.reshape(x, np.shape(x0))
50
+ wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
51
+ return wrap(x)
52
+
53
+
54
+ def _safe_norm(v):
55
+ if not np.isfinite(v).all():
56
+ return np.array(np.inf)
57
+ return norm(v)
58
+
59
+ #------------------------------------------------------------------------------
60
+ # Generic nonlinear solver machinery
61
+ #------------------------------------------------------------------------------
62
+
63
+
64
+ _doc_parts = dict(
65
+ params_basic="""
66
+ F : function(x) -> f
67
+ Function whose root to find; should take and return an array-like
68
+ object.
69
+ xin : array_like
70
+ Initial guess for the solution
71
+ """.strip(),
72
+ params_extra="""
73
+ iter : int, optional
74
+ Number of iterations to make. If omitted (default), make as many
75
+ as required to meet tolerances.
76
+ verbose : bool, optional
77
+ Print status to stdout on every iteration.
78
+ maxiter : int, optional
79
+ Maximum number of iterations to make. If more are needed to
80
+ meet convergence, `NoConvergence` is raised.
81
+ f_tol : float, optional
82
+ Absolute tolerance (in max-norm) for the residual.
83
+ If omitted, default is 6e-6.
84
+ f_rtol : float, optional
85
+ Relative tolerance for the residual. If omitted, not used.
86
+ x_tol : float, optional
87
+ Absolute minimum step size, as determined from the Jacobian
88
+ approximation. If the step size is smaller than this, optimization
89
+ is terminated as successful. If omitted, not used.
90
+ x_rtol : float, optional
91
+ Relative minimum step size. If omitted, not used.
92
+ tol_norm : function(vector) -> scalar, optional
93
+ Norm to use in convergence check. Default is the maximum norm.
94
+ line_search : {None, 'armijo' (default), 'wolfe'}, optional
95
+ Which type of a line search to use to determine the step size in the
96
+ direction given by the Jacobian approximation. Defaults to 'armijo'.
97
+ callback : function, optional
98
+ Optional callback function. It is called on every iteration as
99
+ ``callback(x, f)`` where `x` is the current solution and `f`
100
+ the corresponding residual.
101
+
102
+ Returns
103
+ -------
104
+ sol : ndarray
105
+ An array (of similar array type as `x0`) containing the final solution.
106
+
107
+ Raises
108
+ ------
109
+ NoConvergence
110
+ When a solution was not found.
111
+
112
+ """.strip()
113
+ )
114
+
115
+
116
+ def _set_doc(obj):
117
+ if obj.__doc__:
118
+ obj.__doc__ = obj.__doc__ % _doc_parts
119
+
120
+
121
+ def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
122
+ maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
123
+ tol_norm=None, line_search='armijo', callback=None,
124
+ full_output=False, raise_exception=True):
125
+ """
126
+ Find a root of a function, in a way suitable for large-scale problems.
127
+
128
+ Parameters
129
+ ----------
130
+ %(params_basic)s
131
+ jacobian : Jacobian
132
+ A Jacobian approximation: `Jacobian` object or something that
133
+ `asjacobian` can transform to one. Alternatively, a string specifying
134
+ which of the builtin Jacobian approximations to use:
135
+
136
+ krylov, broyden1, broyden2, anderson
137
+ diagbroyden, linearmixing, excitingmixing
138
+
139
+ %(params_extra)s
140
+ full_output : bool
141
+ If true, returns a dictionary `info` containing convergence
142
+ information.
143
+ raise_exception : bool
144
+ If True, a `NoConvergence` exception is raise if no solution is found.
145
+
146
+ See Also
147
+ --------
148
+ asjacobian, Jacobian
149
+
150
+ Notes
151
+ -----
152
+ This algorithm implements the inexact Newton method, with
153
+ backtracking or full line searches. Several Jacobian
154
+ approximations are available, including Krylov and Quasi-Newton
155
+ methods.
156
+
157
+ References
158
+ ----------
159
+ .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
160
+ Equations\". Society for Industrial and Applied Mathematics. (1995)
161
+ https://archive.siam.org/books/kelley/fr16/
162
+
163
+ """
164
+ # Can't use default parameters because it's being explicitly passed as None
165
+ # from the calling function, so we need to set it here.
166
+ tol_norm = maxnorm if tol_norm is None else tol_norm
167
+ condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
168
+ x_tol=x_tol, x_rtol=x_rtol,
169
+ iter=iter, norm=tol_norm)
170
+
171
+ x0 = _as_inexact(x0)
172
+ def func(z):
173
+ return _as_inexact(F(_array_like(z, x0))).flatten()
174
+ x = x0.flatten()
175
+
176
+ dx = np.full_like(x, np.inf)
177
+ Fx = func(x)
178
+ Fx_norm = norm(Fx)
179
+
180
+ jacobian = asjacobian(jacobian)
181
+ jacobian.setup(x.copy(), Fx, func)
182
+
183
+ if maxiter is None:
184
+ if iter is not None:
185
+ maxiter = iter + 1
186
+ else:
187
+ maxiter = 100*(x.size+1)
188
+
189
+ if line_search is True:
190
+ line_search = 'armijo'
191
+ elif line_search is False:
192
+ line_search = None
193
+
194
+ if line_search not in (None, 'armijo', 'wolfe'):
195
+ raise ValueError("Invalid line search")
196
+
197
+ # Solver tolerance selection
198
+ gamma = 0.9
199
+ eta_max = 0.9999
200
+ eta_treshold = 0.1
201
+ eta = 1e-3
202
+
203
+ for n in range(maxiter):
204
+ status = condition.check(Fx, x, dx)
205
+ if status:
206
+ break
207
+
208
+ # The tolerance, as computed for scipy.sparse.linalg.* routines
209
+ tol = min(eta, eta*Fx_norm)
210
+ dx = -jacobian.solve(Fx, tol=tol)
211
+
212
+ if norm(dx) == 0:
213
+ raise ValueError("Jacobian inversion yielded zero vector. "
214
+ "This indicates a bug in the Jacobian "
215
+ "approximation.")
216
+
217
+ # Line search, or Newton step
218
+ if line_search:
219
+ s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
220
+ line_search)
221
+ else:
222
+ s = 1.0
223
+ x = x + dx
224
+ Fx = func(x)
225
+ Fx_norm_new = norm(Fx)
226
+
227
+ jacobian.update(x.copy(), Fx)
228
+
229
+ if callback:
230
+ callback(x, Fx)
231
+
232
+ # Adjust forcing parameters for inexact methods
233
+ eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
234
+ if gamma * eta**2 < eta_treshold:
235
+ eta = min(eta_max, eta_A)
236
+ else:
237
+ eta = min(eta_max, max(eta_A, gamma*eta**2))
238
+
239
+ Fx_norm = Fx_norm_new
240
+
241
+ # Print status
242
+ if verbose:
243
+ sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
244
+ n, tol_norm(Fx), s))
245
+ sys.stdout.flush()
246
+ else:
247
+ if raise_exception:
248
+ raise NoConvergence(_array_like(x, x0))
249
+ else:
250
+ status = 2
251
+
252
+ if full_output:
253
+ info = {'nit': condition.iteration,
254
+ 'fun': Fx,
255
+ 'status': status,
256
+ 'success': status == 1,
257
+ 'message': {1: 'A solution was found at the specified '
258
+ 'tolerance.',
259
+ 2: 'The maximum number of iterations allowed '
260
+ 'has been reached.'
261
+ }[status]
262
+ }
263
+ return _array_like(x, x0), info
264
+ else:
265
+ return _array_like(x, x0)
266
+
267
+
268
+ _set_doc(nonlin_solve)
269
+
270
+
271
+ def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
272
+ smin=1e-2):
273
+ tmp_s = [0]
274
+ tmp_Fx = [Fx]
275
+ tmp_phi = [norm(Fx)**2]
276
+ s_norm = norm(x) / norm(dx)
277
+
278
+ def phi(s, store=True):
279
+ if s == tmp_s[0]:
280
+ return tmp_phi[0]
281
+ xt = x + s*dx
282
+ v = func(xt)
283
+ p = _safe_norm(v)**2
284
+ if store:
285
+ tmp_s[0] = s
286
+ tmp_phi[0] = p
287
+ tmp_Fx[0] = v
288
+ return p
289
+
290
+ def derphi(s):
291
+ ds = (abs(s) + s_norm + 1) * rdiff
292
+ return (phi(s+ds, store=False) - phi(s)) / ds
293
+
294
+ if search_type == 'wolfe':
295
+ s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
296
+ xtol=1e-2, amin=smin)
297
+ elif search_type == 'armijo':
298
+ s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
299
+ amin=smin)
300
+
301
+ if s is None:
302
+ # XXX: No suitable step length found. Take the full Newton step,
303
+ # and hope for the best.
304
+ s = 1.0
305
+
306
+ x = x + s*dx
307
+ if s == tmp_s[0]:
308
+ Fx = tmp_Fx[0]
309
+ else:
310
+ Fx = func(x)
311
+ Fx_norm = norm(Fx)
312
+
313
+ return s, x, Fx, Fx_norm
314
+
315
+
316
+ class TerminationCondition:
317
+ """
318
+ Termination condition for an iteration. It is terminated if
319
+
320
+ - |F| < f_rtol*|F_0|, AND
321
+ - |F| < f_tol
322
+
323
+ AND
324
+
325
+ - |dx| < x_rtol*|x|, AND
326
+ - |dx| < x_tol
327
+
328
+ """
329
+ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
330
+ iter=None, norm=maxnorm):
331
+
332
+ if f_tol is None:
333
+ f_tol = np.finfo(np.float64).eps ** (1./3)
334
+ if f_rtol is None:
335
+ f_rtol = np.inf
336
+ if x_tol is None:
337
+ x_tol = np.inf
338
+ if x_rtol is None:
339
+ x_rtol = np.inf
340
+
341
+ self.x_tol = x_tol
342
+ self.x_rtol = x_rtol
343
+ self.f_tol = f_tol
344
+ self.f_rtol = f_rtol
345
+
346
+ self.norm = norm
347
+
348
+ self.iter = iter
349
+
350
+ self.f0_norm = None
351
+ self.iteration = 0
352
+
353
+ def check(self, f, x, dx):
354
+ self.iteration += 1
355
+ f_norm = self.norm(f)
356
+ x_norm = self.norm(x)
357
+ dx_norm = self.norm(dx)
358
+
359
+ if self.f0_norm is None:
360
+ self.f0_norm = f_norm
361
+
362
+ if f_norm == 0:
363
+ return 1
364
+
365
+ if self.iter is not None:
366
+ # backwards compatibility with SciPy 0.6.0
367
+ return 2 * (self.iteration > self.iter)
368
+
369
+ # NB: condition must succeed for rtol=inf even if norm == 0
370
+ return int((f_norm <= self.f_tol
371
+ and f_norm/self.f_rtol <= self.f0_norm)
372
+ and (dx_norm <= self.x_tol
373
+ and dx_norm/self.x_rtol <= x_norm))
374
+
375
+
376
+ #------------------------------------------------------------------------------
377
+ # Generic Jacobian approximation
378
+ #------------------------------------------------------------------------------
379
+
380
+ class Jacobian:
381
+ """
382
+ Common interface for Jacobians or Jacobian approximations.
383
+
384
+ The optional methods come useful when implementing trust region
385
+ etc., algorithms that often require evaluating transposes of the
386
+ Jacobian.
387
+
388
+ Methods
389
+ -------
390
+ solve
391
+ Returns J^-1 * v
392
+ update
393
+ Updates Jacobian to point `x` (where the function has residual `Fx`)
394
+
395
+ matvec : optional
396
+ Returns J * v
397
+ rmatvec : optional
398
+ Returns A^H * v
399
+ rsolve : optional
400
+ Returns A^-H * v
401
+ matmat : optional
402
+ Returns A * V, where V is a dense matrix with dimensions (N,K).
403
+ todense : optional
404
+ Form the dense Jacobian matrix. Necessary for dense trust region
405
+ algorithms, and useful for testing.
406
+
407
+ Attributes
408
+ ----------
409
+ shape
410
+ Matrix dimensions (M, N)
411
+ dtype
412
+ Data type of the matrix.
413
+ func : callable, optional
414
+ Function the Jacobian corresponds to
415
+
416
+ """
417
+
418
+ def __init__(self, **kw):
419
+ names = ["solve", "update", "matvec", "rmatvec", "rsolve",
420
+ "matmat", "todense", "shape", "dtype"]
421
+ for name, value in kw.items():
422
+ if name not in names:
423
+ raise ValueError("Unknown keyword argument %s" % name)
424
+ if value is not None:
425
+ setattr(self, name, kw[name])
426
+
427
+
428
+ if hasattr(self, "todense"):
429
+ def __array__(self, dtype=None, copy=None):
430
+ if dtype is not None:
431
+ raise ValueError(f"`dtype` must be None, was {dtype}")
432
+ return self.todense()
433
+
434
+ def aspreconditioner(self):
435
+ return InverseJacobian(self)
436
+
437
+ def solve(self, v, tol=0):
438
+ raise NotImplementedError
439
+
440
+ def update(self, x, F):
441
+ pass
442
+
443
+ def setup(self, x, F, func):
444
+ self.func = func
445
+ self.shape = (F.size, x.size)
446
+ self.dtype = F.dtype
447
+ if self.__class__.setup is Jacobian.setup:
448
+ # Call on the first point unless overridden
449
+ self.update(x, F)
450
+
451
+
452
+ class InverseJacobian:
453
+ def __init__(self, jacobian):
454
+ self.jacobian = jacobian
455
+ self.matvec = jacobian.solve
456
+ self.update = jacobian.update
457
+ if hasattr(jacobian, 'setup'):
458
+ self.setup = jacobian.setup
459
+ if hasattr(jacobian, 'rsolve'):
460
+ self.rmatvec = jacobian.rsolve
461
+
462
+ @property
463
+ def shape(self):
464
+ return self.jacobian.shape
465
+
466
+ @property
467
+ def dtype(self):
468
+ return self.jacobian.dtype
469
+
470
+
471
+ def asjacobian(J):
472
+ """
473
+ Convert given object to one suitable for use as a Jacobian.
474
+ """
475
+ spsolve = scipy.sparse.linalg.spsolve
476
+ if isinstance(J, Jacobian):
477
+ return J
478
+ elif inspect.isclass(J) and issubclass(J, Jacobian):
479
+ return J()
480
+ elif isinstance(J, np.ndarray):
481
+ if J.ndim > 2:
482
+ raise ValueError('array must have rank <= 2')
483
+ J = np.atleast_2d(np.asarray(J))
484
+ if J.shape[0] != J.shape[1]:
485
+ raise ValueError('array must be square')
486
+
487
+ return Jacobian(matvec=lambda v: dot(J, v),
488
+ rmatvec=lambda v: dot(J.conj().T, v),
489
+ solve=lambda v, tol=0: solve(J, v),
490
+ rsolve=lambda v, tol=0: solve(J.conj().T, v),
491
+ dtype=J.dtype, shape=J.shape)
492
+ elif scipy.sparse.issparse(J):
493
+ if J.shape[0] != J.shape[1]:
494
+ raise ValueError('matrix must be square')
495
+ return Jacobian(matvec=lambda v: J @ v,
496
+ rmatvec=lambda v: J.conj().T @ v,
497
+ solve=lambda v, tol=0: spsolve(J, v),
498
+ rsolve=lambda v, tol=0: spsolve(J.conj().T, v),
499
+ dtype=J.dtype, shape=J.shape)
500
+ elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
501
+ return Jacobian(matvec=getattr(J, 'matvec'),
502
+ rmatvec=getattr(J, 'rmatvec'),
503
+ solve=J.solve,
504
+ rsolve=getattr(J, 'rsolve'),
505
+ update=getattr(J, 'update'),
506
+ setup=getattr(J, 'setup'),
507
+ dtype=J.dtype,
508
+ shape=J.shape)
509
+ elif callable(J):
510
+ # Assume it's a function J(x) that returns the Jacobian
511
+ class Jac(Jacobian):
512
+ def update(self, x, F):
513
+ self.x = x
514
+
515
+ def solve(self, v, tol=0):
516
+ m = J(self.x)
517
+ if isinstance(m, np.ndarray):
518
+ return solve(m, v)
519
+ elif scipy.sparse.issparse(m):
520
+ return spsolve(m, v)
521
+ else:
522
+ raise ValueError("Unknown matrix type")
523
+
524
+ def matvec(self, v):
525
+ m = J(self.x)
526
+ if isinstance(m, np.ndarray):
527
+ return dot(m, v)
528
+ elif scipy.sparse.issparse(m):
529
+ return m @ v
530
+ else:
531
+ raise ValueError("Unknown matrix type")
532
+
533
+ def rsolve(self, v, tol=0):
534
+ m = J(self.x)
535
+ if isinstance(m, np.ndarray):
536
+ return solve(m.conj().T, v)
537
+ elif scipy.sparse.issparse(m):
538
+ return spsolve(m.conj().T, v)
539
+ else:
540
+ raise ValueError("Unknown matrix type")
541
+
542
+ def rmatvec(self, v):
543
+ m = J(self.x)
544
+ if isinstance(m, np.ndarray):
545
+ return dot(m.conj().T, v)
546
+ elif scipy.sparse.issparse(m):
547
+ return m.conj().T @ v
548
+ else:
549
+ raise ValueError("Unknown matrix type")
550
+ return Jac()
551
+ elif isinstance(J, str):
552
+ return dict(broyden1=BroydenFirst,
553
+ broyden2=BroydenSecond,
554
+ anderson=Anderson,
555
+ diagbroyden=DiagBroyden,
556
+ linearmixing=LinearMixing,
557
+ excitingmixing=ExcitingMixing,
558
+ krylov=KrylovJacobian)[J]()
559
+ else:
560
+ raise TypeError('Cannot convert object to a Jacobian')
561
+
562
+
563
+ #------------------------------------------------------------------------------
564
+ # Broyden
565
+ #------------------------------------------------------------------------------
566
+
567
+ class GenericBroyden(Jacobian):
568
+ def setup(self, x0, f0, func):
569
+ Jacobian.setup(self, x0, f0, func)
570
+ self.last_f = f0
571
+ self.last_x = x0
572
+
573
+ if hasattr(self, 'alpha') and self.alpha is None:
574
+ # Autoscale the initial Jacobian parameter
575
+ # unless we have already guessed the solution.
576
+ normf0 = norm(f0)
577
+ if normf0:
578
+ self.alpha = 0.5*max(norm(x0), 1) / normf0
579
+ else:
580
+ self.alpha = 1.0
581
+
582
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
583
+ raise NotImplementedError
584
+
585
+ def update(self, x, f):
586
+ df = f - self.last_f
587
+ dx = x - self.last_x
588
+ self._update(x, f, dx, df, norm(dx), norm(df))
589
+ self.last_f = f
590
+ self.last_x = x
591
+
592
+
593
+ class LowRankMatrix:
594
+ r"""
595
+ A matrix represented as
596
+
597
+ .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
598
+
599
+ However, if the rank of the matrix reaches the dimension of the vectors,
600
+ full matrix representation will be used thereon.
601
+
602
+ """
603
+
604
+ def __init__(self, alpha, n, dtype):
605
+ self.alpha = alpha
606
+ self.cs = []
607
+ self.ds = []
608
+ self.n = n
609
+ self.dtype = dtype
610
+ self.collapsed = None
611
+
612
+ @staticmethod
613
+ def _matvec(v, alpha, cs, ds):
614
+ axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
615
+ cs[:1] + [v])
616
+ w = alpha * v
617
+ for c, d in zip(cs, ds):
618
+ a = dotc(d, v)
619
+ w = axpy(c, w, w.size, a)
620
+ return w
621
+
622
+ @staticmethod
623
+ def _solve(v, alpha, cs, ds):
624
+ """Evaluate w = M^-1 v"""
625
+ if len(cs) == 0:
626
+ return v/alpha
627
+
628
+ # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
629
+
630
+ axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
631
+
632
+ c0 = cs[0]
633
+ A = alpha * np.identity(len(cs), dtype=c0.dtype)
634
+ for i, d in enumerate(ds):
635
+ for j, c in enumerate(cs):
636
+ A[i,j] += dotc(d, c)
637
+
638
+ q = np.zeros(len(cs), dtype=c0.dtype)
639
+ for j, d in enumerate(ds):
640
+ q[j] = dotc(d, v)
641
+ q /= alpha
642
+ q = solve(A, q)
643
+
644
+ w = v/alpha
645
+ for c, qc in zip(cs, q):
646
+ w = axpy(c, w, w.size, -qc)
647
+
648
+ return w
649
+
650
+ def matvec(self, v):
651
+ """Evaluate w = M v"""
652
+ if self.collapsed is not None:
653
+ return np.dot(self.collapsed, v)
654
+ return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
655
+
656
+ def rmatvec(self, v):
657
+ """Evaluate w = M^H v"""
658
+ if self.collapsed is not None:
659
+ return np.dot(self.collapsed.T.conj(), v)
660
+ return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
661
+
662
+ def solve(self, v, tol=0):
663
+ """Evaluate w = M^-1 v"""
664
+ if self.collapsed is not None:
665
+ return solve(self.collapsed, v)
666
+ return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
667
+
668
+ def rsolve(self, v, tol=0):
669
+ """Evaluate w = M^-H v"""
670
+ if self.collapsed is not None:
671
+ return solve(self.collapsed.T.conj(), v)
672
+ return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
673
+
674
+ def append(self, c, d):
675
+ if self.collapsed is not None:
676
+ self.collapsed += c[:,None] * d[None,:].conj()
677
+ return
678
+
679
+ self.cs.append(c)
680
+ self.ds.append(d)
681
+
682
+ if len(self.cs) > c.size:
683
+ self.collapse()
684
+
685
+ def __array__(self, dtype=None, copy=None):
686
+ if dtype is not None:
687
+ warnings.warn("LowRankMatrix is scipy-internal code, `dtype` "
688
+ f"should only be None but was {dtype} (not handled)",
689
+ stacklevel=3)
690
+ if copy is not None:
691
+ warnings.warn("LowRankMatrix is scipy-internal code, `copy` "
692
+ f"should only be None but was {copy} (not handled)",
693
+ stacklevel=3)
694
+ if self.collapsed is not None:
695
+ return self.collapsed
696
+
697
+ Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
698
+ for c, d in zip(self.cs, self.ds):
699
+ Gm += c[:,None]*d[None,:].conj()
700
+ return Gm
701
+
702
+ def collapse(self):
703
+ """Collapse the low-rank matrix to a full-rank one."""
704
+ self.collapsed = np.array(self)
705
+ self.cs = None
706
+ self.ds = None
707
+ self.alpha = None
708
+
709
+ def restart_reduce(self, rank):
710
+ """
711
+ Reduce the rank of the matrix by dropping all vectors.
712
+ """
713
+ if self.collapsed is not None:
714
+ return
715
+ assert rank > 0
716
+ if len(self.cs) > rank:
717
+ del self.cs[:]
718
+ del self.ds[:]
719
+
720
+ def simple_reduce(self, rank):
721
+ """
722
+ Reduce the rank of the matrix by dropping oldest vectors.
723
+ """
724
+ if self.collapsed is not None:
725
+ return
726
+ assert rank > 0
727
+ while len(self.cs) > rank:
728
+ del self.cs[0]
729
+ del self.ds[0]
730
+
731
+ def svd_reduce(self, max_rank, to_retain=None):
732
+ """
733
+ Reduce the rank of the matrix by retaining some SVD components.
734
+
735
+ This corresponds to the \"Broyden Rank Reduction Inverse\"
736
+ algorithm described in [1]_.
737
+
738
+ Note that the SVD decomposition can be done by solving only a
739
+ problem whose size is the effective rank of this matrix, which
740
+ is viable even for large problems.
741
+
742
+ Parameters
743
+ ----------
744
+ max_rank : int
745
+ Maximum rank of this matrix after reduction.
746
+ to_retain : int, optional
747
+ Number of SVD components to retain when reduction is done
748
+ (ie. rank > max_rank). Default is ``max_rank - 2``.
749
+
750
+ References
751
+ ----------
752
+ .. [1] B.A. van der Rotten, PhD thesis,
753
+ \"A limited memory Broyden method to solve high-dimensional
754
+ systems of nonlinear equations\". Mathematisch Instituut,
755
+ Universiteit Leiden, The Netherlands (2003).
756
+
757
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
758
+
759
+ """
760
+ if self.collapsed is not None:
761
+ return
762
+
763
+ p = max_rank
764
+ if to_retain is not None:
765
+ q = to_retain
766
+ else:
767
+ q = p - 2
768
+
769
+ if self.cs:
770
+ p = min(p, len(self.cs[0]))
771
+ q = max(0, min(q, p-1))
772
+
773
+ m = len(self.cs)
774
+ if m < p:
775
+ # nothing to do
776
+ return
777
+
778
+ C = np.array(self.cs).T
779
+ D = np.array(self.ds).T
780
+
781
+ D, R = qr(D, mode='economic')
782
+ C = dot(C, R.T.conj())
783
+
784
+ U, S, WH = svd(C, full_matrices=False)
785
+
786
+ C = dot(C, inv(WH))
787
+ D = dot(D, WH.T.conj())
788
+
789
+ for k in range(q):
790
+ self.cs[k] = C[:,k].copy()
791
+ self.ds[k] = D[:,k].copy()
792
+
793
+ del self.cs[q:]
794
+ del self.ds[q:]
795
+
796
+
797
+ _doc_parts['broyden_params'] = """
798
+ alpha : float, optional
799
+ Initial guess for the Jacobian is ``(-1/alpha)``.
800
+ reduction_method : str or tuple, optional
801
+ Method used in ensuring that the rank of the Broyden matrix
802
+ stays low. Can either be a string giving the name of the method,
803
+ or a tuple of the form ``(method, param1, param2, ...)``
804
+ that gives the name of the method and values for additional parameters.
805
+
806
+ Methods available:
807
+
808
+ - ``restart``: drop all matrix columns. Has no extra parameters.
809
+ - ``simple``: drop oldest matrix column. Has no extra parameters.
810
+ - ``svd``: keep only the most significant SVD components.
811
+ Takes an extra parameter, ``to_retain``, which determines the
812
+ number of SVD components to retain when rank reduction is done.
813
+ Default is ``max_rank - 2``.
814
+
815
+ max_rank : int, optional
816
+ Maximum rank for the Broyden matrix.
817
+ Default is infinity (i.e., no rank reduction).
818
+ """.strip()
819
+
820
+
821
+ class BroydenFirst(GenericBroyden):
822
+ r"""
823
+ Find a root of a function, using Broyden's first Jacobian approximation.
824
+
825
+ This method is also known as \"Broyden's good method\".
826
+
827
+ Parameters
828
+ ----------
829
+ %(params_basic)s
830
+ %(broyden_params)s
831
+ %(params_extra)s
832
+
833
+ See Also
834
+ --------
835
+ root : Interface to root finding algorithms for multivariate
836
+ functions. See ``method='broyden1'`` in particular.
837
+
838
+ Notes
839
+ -----
840
+ This algorithm implements the inverse Jacobian Quasi-Newton update
841
+
842
+ .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
843
+
844
+ which corresponds to Broyden's first Jacobian update
845
+
846
+ .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
847
+
848
+
849
+ References
850
+ ----------
851
+ .. [1] B.A. van der Rotten, PhD thesis,
852
+ \"A limited memory Broyden method to solve high-dimensional
853
+ systems of nonlinear equations\". Mathematisch Instituut,
854
+ Universiteit Leiden, The Netherlands (2003).
855
+
856
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
857
+
858
+ Examples
859
+ --------
860
+ The following functions define a system of nonlinear equations
861
+
862
+ >>> def fun(x):
863
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
864
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
865
+
866
+ A solution can be obtained as follows.
867
+
868
+ >>> from scipy import optimize
869
+ >>> sol = optimize.broyden1(fun, [0, 0])
870
+ >>> sol
871
+ array([0.84116396, 0.15883641])
872
+
873
+ """
874
+
875
+ def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
876
+ GenericBroyden.__init__(self)
877
+ self.alpha = alpha
878
+ self.Gm = None
879
+
880
+ if max_rank is None:
881
+ max_rank = np.inf
882
+ self.max_rank = max_rank
883
+
884
+ if isinstance(reduction_method, str):
885
+ reduce_params = ()
886
+ else:
887
+ reduce_params = reduction_method[1:]
888
+ reduction_method = reduction_method[0]
889
+ reduce_params = (max_rank - 1,) + reduce_params
890
+
891
+ if reduction_method == 'svd':
892
+ self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
893
+ elif reduction_method == 'simple':
894
+ self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
895
+ elif reduction_method == 'restart':
896
+ self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
897
+ else:
898
+ raise ValueError("Unknown rank reduction method '%s'" %
899
+ reduction_method)
900
+
901
+ def setup(self, x, F, func):
902
+ GenericBroyden.setup(self, x, F, func)
903
+ self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
904
+
905
+ def todense(self):
906
+ return inv(self.Gm)
907
+
908
+ def solve(self, f, tol=0):
909
+ r = self.Gm.matvec(f)
910
+ if not np.isfinite(r).all():
911
+ # singular; reset the Jacobian approximation
912
+ self.setup(self.last_x, self.last_f, self.func)
913
+ return self.Gm.matvec(f)
914
+ return r
915
+
916
+ def matvec(self, f):
917
+ return self.Gm.solve(f)
918
+
919
+ def rsolve(self, f, tol=0):
920
+ return self.Gm.rmatvec(f)
921
+
922
+ def rmatvec(self, f):
923
+ return self.Gm.rsolve(f)
924
+
925
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
926
+ self._reduce() # reduce first to preserve secant condition
927
+
928
+ v = self.Gm.rmatvec(dx)
929
+ c = dx - self.Gm.matvec(df)
930
+ d = v / vdot(df, v)
931
+
932
+ self.Gm.append(c, d)
933
+
934
+
935
+ class BroydenSecond(BroydenFirst):
936
+ """
937
+ Find a root of a function, using Broyden\'s second Jacobian approximation.
938
+
939
+ This method is also known as \"Broyden's bad method\".
940
+
941
+ Parameters
942
+ ----------
943
+ %(params_basic)s
944
+ %(broyden_params)s
945
+ %(params_extra)s
946
+
947
+ See Also
948
+ --------
949
+ root : Interface to root finding algorithms for multivariate
950
+ functions. See ``method='broyden2'`` in particular.
951
+
952
+ Notes
953
+ -----
954
+ This algorithm implements the inverse Jacobian Quasi-Newton update
955
+
956
+ .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
957
+
958
+ corresponding to Broyden's second method.
959
+
960
+ References
961
+ ----------
962
+ .. [1] B.A. van der Rotten, PhD thesis,
963
+ \"A limited memory Broyden method to solve high-dimensional
964
+ systems of nonlinear equations\". Mathematisch Instituut,
965
+ Universiteit Leiden, The Netherlands (2003).
966
+
967
+ https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
968
+
969
+ Examples
970
+ --------
971
+ The following functions define a system of nonlinear equations
972
+
973
+ >>> def fun(x):
974
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
975
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
976
+
977
+ A solution can be obtained as follows.
978
+
979
+ >>> from scipy import optimize
980
+ >>> sol = optimize.broyden2(fun, [0, 0])
981
+ >>> sol
982
+ array([0.84116365, 0.15883529])
983
+
984
+ """
985
+
986
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
987
+ self._reduce() # reduce first to preserve secant condition
988
+
989
+ v = df
990
+ c = dx - self.Gm.matvec(df)
991
+ d = v / df_norm**2
992
+ self.Gm.append(c, d)
993
+
994
+
995
+ #------------------------------------------------------------------------------
996
+ # Broyden-like (restricted memory)
997
+ #------------------------------------------------------------------------------
998
+
999
+ class Anderson(GenericBroyden):
1000
+ """
1001
+ Find a root of a function, using (extended) Anderson mixing.
1002
+
1003
+ The Jacobian is formed by for a 'best' solution in the space
1004
+ spanned by last `M` vectors. As a result, only a MxM matrix
1005
+ inversions and MxN multiplications are required. [Ey]_
1006
+
1007
+ Parameters
1008
+ ----------
1009
+ %(params_basic)s
1010
+ alpha : float, optional
1011
+ Initial guess for the Jacobian is (-1/alpha).
1012
+ M : float, optional
1013
+ Number of previous vectors to retain. Defaults to 5.
1014
+ w0 : float, optional
1015
+ Regularization parameter for numerical stability.
1016
+ Compared to unity, good values of the order of 0.01.
1017
+ %(params_extra)s
1018
+
1019
+ See Also
1020
+ --------
1021
+ root : Interface to root finding algorithms for multivariate
1022
+ functions. See ``method='anderson'`` in particular.
1023
+
1024
+ References
1025
+ ----------
1026
+ .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
1027
+
1028
+ Examples
1029
+ --------
1030
+ The following functions define a system of nonlinear equations
1031
+
1032
+ >>> def fun(x):
1033
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
1034
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
1035
+
1036
+ A solution can be obtained as follows.
1037
+
1038
+ >>> from scipy import optimize
1039
+ >>> sol = optimize.anderson(fun, [0, 0])
1040
+ >>> sol
1041
+ array([0.84116588, 0.15883789])
1042
+
1043
+ """
1044
+
1045
+ # Note:
1046
+ #
1047
+ # Anderson method maintains a rank M approximation of the inverse Jacobian,
1048
+ #
1049
+ # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
1050
+ # A = W + dF^H dF
1051
+ # W = w0^2 diag(dF^H dF)
1052
+ #
1053
+ # so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
1054
+ #
1055
+ # J^-1 df_j = dx_j
1056
+ #
1057
+ # for all j = 0 ... M-1.
1058
+ #
1059
+ # Moreover, (from Sherman-Morrison-Woodbury formula)
1060
+ #
1061
+ # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
1062
+ # C = (dX + alpha dF) A^-1
1063
+ # b = -1/alpha
1064
+ #
1065
+ # and after simplification
1066
+ #
1067
+ # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
1068
+ #
1069
+
1070
+ def __init__(self, alpha=None, w0=0.01, M=5):
1071
+ GenericBroyden.__init__(self)
1072
+ self.alpha = alpha
1073
+ self.M = M
1074
+ self.dx = []
1075
+ self.df = []
1076
+ self.gamma = None
1077
+ self.w0 = w0
1078
+
1079
+ def solve(self, f, tol=0):
1080
+ dx = -self.alpha*f
1081
+
1082
+ n = len(self.dx)
1083
+ if n == 0:
1084
+ return dx
1085
+
1086
+ df_f = np.empty(n, dtype=f.dtype)
1087
+ for k in range(n):
1088
+ df_f[k] = vdot(self.df[k], f)
1089
+
1090
+ try:
1091
+ gamma = solve(self.a, df_f)
1092
+ except LinAlgError:
1093
+ # singular; reset the Jacobian approximation
1094
+ del self.dx[:]
1095
+ del self.df[:]
1096
+ return dx
1097
+
1098
+ for m in range(n):
1099
+ dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
1100
+ return dx
1101
+
1102
+ def matvec(self, f):
1103
+ dx = -f/self.alpha
1104
+
1105
+ n = len(self.dx)
1106
+ if n == 0:
1107
+ return dx
1108
+
1109
+ df_f = np.empty(n, dtype=f.dtype)
1110
+ for k in range(n):
1111
+ df_f[k] = vdot(self.df[k], f)
1112
+
1113
+ b = np.empty((n, n), dtype=f.dtype)
1114
+ for i in range(n):
1115
+ for j in range(n):
1116
+ b[i,j] = vdot(self.df[i], self.dx[j])
1117
+ if i == j and self.w0 != 0:
1118
+ b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
1119
+ gamma = solve(b, df_f)
1120
+
1121
+ for m in range(n):
1122
+ dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
1123
+ return dx
1124
+
1125
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1126
+ if self.M == 0:
1127
+ return
1128
+
1129
+ self.dx.append(dx)
1130
+ self.df.append(df)
1131
+
1132
+ while len(self.dx) > self.M:
1133
+ self.dx.pop(0)
1134
+ self.df.pop(0)
1135
+
1136
+ n = len(self.dx)
1137
+ a = np.zeros((n, n), dtype=f.dtype)
1138
+
1139
+ for i in range(n):
1140
+ for j in range(i, n):
1141
+ if i == j:
1142
+ wd = self.w0**2
1143
+ else:
1144
+ wd = 0
1145
+ a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
1146
+
1147
+ a += np.triu(a, 1).T.conj()
1148
+ self.a = a
1149
+
1150
+ #------------------------------------------------------------------------------
1151
+ # Simple iterations
1152
+ #------------------------------------------------------------------------------
1153
+
1154
+
1155
+ class DiagBroyden(GenericBroyden):
1156
+ """
1157
+ Find a root of a function, using diagonal Broyden Jacobian approximation.
1158
+
1159
+ The Jacobian approximation is derived from previous iterations, by
1160
+ retaining only the diagonal of Broyden matrices.
1161
+
1162
+ .. warning::
1163
+
1164
+ This algorithm may be useful for specific problems, but whether
1165
+ it will work may depend strongly on the problem.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ %(params_basic)s
1170
+ alpha : float, optional
1171
+ Initial guess for the Jacobian is (-1/alpha).
1172
+ %(params_extra)s
1173
+
1174
+ See Also
1175
+ --------
1176
+ root : Interface to root finding algorithms for multivariate
1177
+ functions. See ``method='diagbroyden'`` in particular.
1178
+
1179
+ Examples
1180
+ --------
1181
+ The following functions define a system of nonlinear equations
1182
+
1183
+ >>> def fun(x):
1184
+ ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
1185
+ ... 0.5 * (x[1] - x[0])**3 + x[1]]
1186
+
1187
+ A solution can be obtained as follows.
1188
+
1189
+ >>> from scipy import optimize
1190
+ >>> sol = optimize.diagbroyden(fun, [0, 0])
1191
+ >>> sol
1192
+ array([0.84116403, 0.15883384])
1193
+
1194
+ """
1195
+
1196
+ def __init__(self, alpha=None):
1197
+ GenericBroyden.__init__(self)
1198
+ self.alpha = alpha
1199
+
1200
+ def setup(self, x, F, func):
1201
+ GenericBroyden.setup(self, x, F, func)
1202
+ self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
1203
+
1204
+ def solve(self, f, tol=0):
1205
+ return -f / self.d
1206
+
1207
+ def matvec(self, f):
1208
+ return -f * self.d
1209
+
1210
+ def rsolve(self, f, tol=0):
1211
+ return -f / self.d.conj()
1212
+
1213
+ def rmatvec(self, f):
1214
+ return -f * self.d.conj()
1215
+
1216
+ def todense(self):
1217
+ return np.diag(-self.d)
1218
+
1219
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1220
+ self.d -= (df + self.d*dx)*dx/dx_norm**2
1221
+
1222
+
1223
+ class LinearMixing(GenericBroyden):
1224
+ """
1225
+ Find a root of a function, using a scalar Jacobian approximation.
1226
+
1227
+ .. warning::
1228
+
1229
+ This algorithm may be useful for specific problems, but whether
1230
+ it will work may depend strongly on the problem.
1231
+
1232
+ Parameters
1233
+ ----------
1234
+ %(params_basic)s
1235
+ alpha : float, optional
1236
+ The Jacobian approximation is (-1/alpha).
1237
+ %(params_extra)s
1238
+
1239
+ See Also
1240
+ --------
1241
+ root : Interface to root finding algorithms for multivariate
1242
+ functions. See ``method='linearmixing'`` in particular.
1243
+
1244
+ """
1245
+
1246
+ def __init__(self, alpha=None):
1247
+ GenericBroyden.__init__(self)
1248
+ self.alpha = alpha
1249
+
1250
+ def solve(self, f, tol=0):
1251
+ return -f*self.alpha
1252
+
1253
+ def matvec(self, f):
1254
+ return -f/self.alpha
1255
+
1256
+ def rsolve(self, f, tol=0):
1257
+ return -f*np.conj(self.alpha)
1258
+
1259
+ def rmatvec(self, f):
1260
+ return -f/np.conj(self.alpha)
1261
+
1262
+ def todense(self):
1263
+ return np.diag(np.full(self.shape[0], -1/self.alpha))
1264
+
1265
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1266
+ pass
1267
+
1268
+
1269
+ class ExcitingMixing(GenericBroyden):
1270
+ """
1271
+ Find a root of a function, using a tuned diagonal Jacobian approximation.
1272
+
1273
+ The Jacobian matrix is diagonal and is tuned on each iteration.
1274
+
1275
+ .. warning::
1276
+
1277
+ This algorithm may be useful for specific problems, but whether
1278
+ it will work may depend strongly on the problem.
1279
+
1280
+ See Also
1281
+ --------
1282
+ root : Interface to root finding algorithms for multivariate
1283
+ functions. See ``method='excitingmixing'`` in particular.
1284
+
1285
+ Parameters
1286
+ ----------
1287
+ %(params_basic)s
1288
+ alpha : float, optional
1289
+ Initial Jacobian approximation is (-1/alpha).
1290
+ alphamax : float, optional
1291
+ The entries of the diagonal Jacobian are kept in the range
1292
+ ``[alpha, alphamax]``.
1293
+ %(params_extra)s
1294
+ """
1295
+
1296
+ def __init__(self, alpha=None, alphamax=1.0):
1297
+ GenericBroyden.__init__(self)
1298
+ self.alpha = alpha
1299
+ self.alphamax = alphamax
1300
+ self.beta = None
1301
+
1302
+ def setup(self, x, F, func):
1303
+ GenericBroyden.setup(self, x, F, func)
1304
+ self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
1305
+
1306
+ def solve(self, f, tol=0):
1307
+ return -f*self.beta
1308
+
1309
+ def matvec(self, f):
1310
+ return -f/self.beta
1311
+
1312
+ def rsolve(self, f, tol=0):
1313
+ return -f*self.beta.conj()
1314
+
1315
+ def rmatvec(self, f):
1316
+ return -f/self.beta.conj()
1317
+
1318
+ def todense(self):
1319
+ return np.diag(-1/self.beta)
1320
+
1321
+ def _update(self, x, f, dx, df, dx_norm, df_norm):
1322
+ incr = f*self.last_f > 0
1323
+ self.beta[incr] += self.alpha
1324
+ self.beta[~incr] = self.alpha
1325
+ np.clip(self.beta, 0, self.alphamax, out=self.beta)
1326
+
1327
+
1328
+ #------------------------------------------------------------------------------
1329
+ # Iterative/Krylov approximated Jacobians
1330
+ #------------------------------------------------------------------------------
1331
+
1332
+ class KrylovJacobian(Jacobian):
1333
+ r"""
1334
+ Find a root of a function, using Krylov approximation for inverse Jacobian.
1335
+
1336
+ This method is suitable for solving large-scale problems.
1337
+
1338
+ Parameters
1339
+ ----------
1340
+ %(params_basic)s
1341
+ rdiff : float, optional
1342
+ Relative step size to use in numerical differentiation.
1343
+ method : str or callable, optional
1344
+ Krylov method to use to approximate the Jacobian. Can be a string,
1345
+ or a function implementing the same interface as the iterative
1346
+ solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
1347
+ ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
1348
+ ``'tfqmr'``.
1349
+
1350
+ The default is `scipy.sparse.linalg.lgmres`.
1351
+ inner_maxiter : int, optional
1352
+ Parameter to pass to the "inner" Krylov solver: maximum number of
1353
+ iterations. Iteration will stop after maxiter steps even if the
1354
+ specified tolerance has not been achieved.
1355
+ inner_M : LinearOperator or InverseJacobian
1356
+ Preconditioner for the inner Krylov iteration.
1357
+ Note that you can use also inverse Jacobians as (adaptive)
1358
+ preconditioners. For example,
1359
+
1360
+ >>> from scipy.optimize import BroydenFirst, KrylovJacobian
1361
+ >>> from scipy.optimize import InverseJacobian
1362
+ >>> jac = BroydenFirst()
1363
+ >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
1364
+
1365
+ If the preconditioner has a method named 'update', it will be called
1366
+ as ``update(x, f)`` after each nonlinear step, with ``x`` giving
1367
+ the current point, and ``f`` the current function value.
1368
+ outer_k : int, optional
1369
+ Size of the subspace kept across LGMRES nonlinear iterations.
1370
+ See `scipy.sparse.linalg.lgmres` for details.
1371
+ inner_kwargs : kwargs
1372
+ Keyword parameters for the "inner" Krylov solver
1373
+ (defined with `method`). Parameter names must start with
1374
+ the `inner_` prefix which will be stripped before passing on
1375
+ the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
1376
+ %(params_extra)s
1377
+
1378
+ See Also
1379
+ --------
1380
+ root : Interface to root finding algorithms for multivariate
1381
+ functions. See ``method='krylov'`` in particular.
1382
+ scipy.sparse.linalg.gmres
1383
+ scipy.sparse.linalg.lgmres
1384
+
1385
+ Notes
1386
+ -----
1387
+ This function implements a Newton-Krylov solver. The basic idea is
1388
+ to compute the inverse of the Jacobian with an iterative Krylov
1389
+ method. These methods require only evaluating the Jacobian-vector
1390
+ products, which are conveniently approximated by a finite difference:
1391
+
1392
+ .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
1393
+
1394
+ Due to the use of iterative matrix inverses, these methods can
1395
+ deal with large nonlinear problems.
1396
+
1397
+ SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
1398
+ solvers to choose from. The default here is `lgmres`, which is a
1399
+ variant of restarted GMRES iteration that reuses some of the
1400
+ information obtained in the previous Newton steps to invert
1401
+ Jacobians in subsequent steps.
1402
+
1403
+ For a review on Newton-Krylov methods, see for example [1]_,
1404
+ and for the LGMRES sparse inverse method, see [2]_.
1405
+
1406
+ References
1407
+ ----------
1408
+ .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
1409
+ SIAM, pp.57-83, 2003.
1410
+ :doi:`10.1137/1.9780898718898.ch3`
1411
+ .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
1412
+ :doi:`10.1016/j.jcp.2003.08.010`
1413
+ .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
1414
+ SIAM J. Matrix Anal. Appl. 26, 962 (2005).
1415
+ :doi:`10.1137/S0895479803422014`
1416
+
1417
+ Examples
1418
+ --------
1419
+ The following functions define a system of nonlinear equations
1420
+
1421
+ >>> def fun(x):
1422
+ ... return [x[0] + 0.5 * x[1] - 1.0,
1423
+ ... 0.5 * (x[1] - x[0]) ** 2]
1424
+
1425
+ A solution can be obtained as follows.
1426
+
1427
+ >>> from scipy import optimize
1428
+ >>> sol = optimize.newton_krylov(fun, [0, 0])
1429
+ >>> sol
1430
+ array([0.66731771, 0.66536458])
1431
+
1432
+ """
1433
+
1434
+ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
1435
+ inner_M=None, outer_k=10, **kw):
1436
+ self.preconditioner = inner_M
1437
+ self.rdiff = rdiff
1438
+ # Note that this retrieves one of the named functions, or otherwise
1439
+ # uses `method` as is (i.e., for a user-provided callable).
1440
+ self.method = dict(
1441
+ bicgstab=scipy.sparse.linalg.bicgstab,
1442
+ gmres=scipy.sparse.linalg.gmres,
1443
+ lgmres=scipy.sparse.linalg.lgmres,
1444
+ cgs=scipy.sparse.linalg.cgs,
1445
+ minres=scipy.sparse.linalg.minres,
1446
+ tfqmr=scipy.sparse.linalg.tfqmr,
1447
+ ).get(method, method)
1448
+
1449
+ self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
1450
+
1451
+ if self.method is scipy.sparse.linalg.gmres:
1452
+ # Replace GMRES's outer iteration with Newton steps
1453
+ self.method_kw['restart'] = inner_maxiter
1454
+ self.method_kw['maxiter'] = 1
1455
+ self.method_kw.setdefault('atol', 0)
1456
+ elif self.method in (scipy.sparse.linalg.gcrotmk,
1457
+ scipy.sparse.linalg.bicgstab,
1458
+ scipy.sparse.linalg.cgs):
1459
+ self.method_kw.setdefault('atol', 0)
1460
+ elif self.method is scipy.sparse.linalg.lgmres:
1461
+ self.method_kw['outer_k'] = outer_k
1462
+ # Replace LGMRES's outer iteration with Newton steps
1463
+ self.method_kw['maxiter'] = 1
1464
+ # Carry LGMRES's `outer_v` vectors across nonlinear iterations
1465
+ self.method_kw.setdefault('outer_v', [])
1466
+ self.method_kw.setdefault('prepend_outer_v', True)
1467
+ # But don't carry the corresponding Jacobian*v products, in case
1468
+ # the Jacobian changes a lot in the nonlinear step
1469
+ #
1470
+ # XXX: some trust-region inspired ideas might be more efficient...
1471
+ # See e.g., Brown & Saad. But needs to be implemented separately
1472
+ # since it's not an inexact Newton method.
1473
+ self.method_kw.setdefault('store_outer_Av', False)
1474
+ self.method_kw.setdefault('atol', 0)
1475
+
1476
+ for key, value in kw.items():
1477
+ if not key.startswith('inner_'):
1478
+ raise ValueError("Unknown parameter %s" % key)
1479
+ self.method_kw[key[6:]] = value
1480
+
1481
+ def _update_diff_step(self):
1482
+ mx = abs(self.x0).max()
1483
+ mf = abs(self.f0).max()
1484
+ self.omega = self.rdiff * max(1, mx) / max(1, mf)
1485
+
1486
+ def matvec(self, v):
1487
+ nv = norm(v)
1488
+ if nv == 0:
1489
+ return 0*v
1490
+ sc = self.omega / nv
1491
+ r = (self.func(self.x0 + sc*v) - self.f0) / sc
1492
+ if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
1493
+ raise ValueError('Function returned non-finite results')
1494
+ return r
1495
+
1496
+ def solve(self, rhs, tol=0):
1497
+ if 'rtol' in self.method_kw:
1498
+ sol, info = self.method(self.op, rhs, **self.method_kw)
1499
+ else:
1500
+ sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw)
1501
+ return sol
1502
+
1503
+ def update(self, x, f):
1504
+ self.x0 = x
1505
+ self.f0 = f
1506
+ self._update_diff_step()
1507
+
1508
+ # Update also the preconditioner, if possible
1509
+ if self.preconditioner is not None:
1510
+ if hasattr(self.preconditioner, 'update'):
1511
+ self.preconditioner.update(x, f)
1512
+
1513
+ def setup(self, x, f, func):
1514
+ Jacobian.setup(self, x, f, func)
1515
+ self.x0 = x
1516
+ self.f0 = f
1517
+ self.op = scipy.sparse.linalg.aslinearoperator(self)
1518
+
1519
+ if self.rdiff is None:
1520
+ self.rdiff = np.finfo(x.dtype).eps ** (1./2)
1521
+
1522
+ self._update_diff_step()
1523
+
1524
+ # Setup also the preconditioner, if possible
1525
+ if self.preconditioner is not None:
1526
+ if hasattr(self.preconditioner, 'setup'):
1527
+ self.preconditioner.setup(x, f, func)
1528
+
1529
+
1530
+ #------------------------------------------------------------------------------
1531
+ # Wrapper functions
1532
+ #------------------------------------------------------------------------------
1533
+
1534
+ def _nonlin_wrapper(name, jac):
1535
+ """
1536
+ Construct a solver wrapper with given name and Jacobian approx.
1537
+
1538
+ It inspects the keyword arguments of ``jac.__init__``, and allows to
1539
+ use the same arguments in the wrapper function, in addition to the
1540
+ keyword arguments of `nonlin_solve`
1541
+
1542
+ """
1543
+ signature = _getfullargspec(jac.__init__)
1544
+ args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
1545
+ kwargs = list(zip(args[-len(defaults):], defaults))
1546
+ kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs])
1547
+ if kw_str:
1548
+ kw_str = ", " + kw_str
1549
+ kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs])
1550
+ if kwkw_str:
1551
+ kwkw_str = kwkw_str + ", "
1552
+ if kwonlyargs:
1553
+ raise ValueError('Unexpected signature %s' % signature)
1554
+
1555
+ # Construct the wrapper function so that its keyword arguments
1556
+ # are visible in pydoc.help etc.
1557
+ wrapper = """
1558
+ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
1559
+ f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
1560
+ tol_norm=None, line_search='armijo', callback=None, **kw):
1561
+ jac = %(jac)s(%(kwkw)s **kw)
1562
+ return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
1563
+ f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
1564
+ callback)
1565
+ """
1566
+
1567
+ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
1568
+ kwkw=kwkw_str)
1569
+ ns = {}
1570
+ ns.update(globals())
1571
+ exec(wrapper, ns)
1572
+ func = ns[name]
1573
+ func.__doc__ = jac.__doc__
1574
+ _set_doc(func)
1575
+ return func
1576
+
1577
+
1578
+ broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
1579
+ broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
1580
+ anderson = _nonlin_wrapper('anderson', Anderson)
1581
+ linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
1582
+ diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
1583
+ excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
1584
+ newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_numdiff.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines for numerical differentiation."""
2
+ import functools
3
+ import numpy as np
4
+ from numpy.linalg import norm
5
+
6
+ from scipy.sparse.linalg import LinearOperator
7
+ from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
8
+ from ._group_columns import group_dense, group_sparse
9
+ from scipy._lib._array_api import atleast_nd, array_namespace
10
+
11
+
12
+ def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
13
+ """Adjust final difference scheme to the presence of bounds.
14
+
15
+ Parameters
16
+ ----------
17
+ x0 : ndarray, shape (n,)
18
+ Point at which we wish to estimate derivative.
19
+ h : ndarray, shape (n,)
20
+ Desired absolute finite difference steps.
21
+ num_steps : int
22
+ Number of `h` steps in one direction required to implement finite
23
+ difference scheme. For example, 2 means that we need to evaluate
24
+ f(x0 + 2 * h) or f(x0 - 2 * h)
25
+ scheme : {'1-sided', '2-sided'}
26
+ Whether steps in one or both directions are required. In other
27
+ words '1-sided' applies to forward and backward schemes, '2-sided'
28
+ applies to center schemes.
29
+ lb : ndarray, shape (n,)
30
+ Lower bounds on independent variables.
31
+ ub : ndarray, shape (n,)
32
+ Upper bounds on independent variables.
33
+
34
+ Returns
35
+ -------
36
+ h_adjusted : ndarray, shape (n,)
37
+ Adjusted absolute step sizes. Step size decreases only if a sign flip
38
+ or switching to one-sided scheme doesn't allow to take a full step.
39
+ use_one_sided : ndarray of bool, shape (n,)
40
+ Whether to switch to one-sided scheme. Informative only for
41
+ ``scheme='2-sided'``.
42
+ """
43
+ if scheme == '1-sided':
44
+ use_one_sided = np.ones_like(h, dtype=bool)
45
+ elif scheme == '2-sided':
46
+ h = np.abs(h)
47
+ use_one_sided = np.zeros_like(h, dtype=bool)
48
+ else:
49
+ raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
50
+
51
+ if np.all((lb == -np.inf) & (ub == np.inf)):
52
+ return h, use_one_sided
53
+
54
+ h_total = h * num_steps
55
+ h_adjusted = h.copy()
56
+
57
+ lower_dist = x0 - lb
58
+ upper_dist = ub - x0
59
+
60
+ if scheme == '1-sided':
61
+ x = x0 + h_total
62
+ violated = (x < lb) | (x > ub)
63
+ fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
64
+ h_adjusted[violated & fitting] *= -1
65
+
66
+ forward = (upper_dist >= lower_dist) & ~fitting
67
+ h_adjusted[forward] = upper_dist[forward] / num_steps
68
+ backward = (upper_dist < lower_dist) & ~fitting
69
+ h_adjusted[backward] = -lower_dist[backward] / num_steps
70
+ elif scheme == '2-sided':
71
+ central = (lower_dist >= h_total) & (upper_dist >= h_total)
72
+
73
+ forward = (upper_dist >= lower_dist) & ~central
74
+ h_adjusted[forward] = np.minimum(
75
+ h[forward], 0.5 * upper_dist[forward] / num_steps)
76
+ use_one_sided[forward] = True
77
+
78
+ backward = (upper_dist < lower_dist) & ~central
79
+ h_adjusted[backward] = -np.minimum(
80
+ h[backward], 0.5 * lower_dist[backward] / num_steps)
81
+ use_one_sided[backward] = True
82
+
83
+ min_dist = np.minimum(upper_dist, lower_dist) / num_steps
84
+ adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
85
+ h_adjusted[adjusted_central] = min_dist[adjusted_central]
86
+ use_one_sided[adjusted_central] = False
87
+
88
+ return h_adjusted, use_one_sided
89
+
90
+
91
+ @functools.lru_cache
92
+ def _eps_for_method(x0_dtype, f0_dtype, method):
93
+ """
94
+ Calculates relative EPS step to use for a given data type
95
+ and numdiff step method.
96
+
97
+ Progressively smaller steps are used for larger floating point types.
98
+
99
+ Parameters
100
+ ----------
101
+ f0_dtype: np.dtype
102
+ dtype of function evaluation
103
+
104
+ x0_dtype: np.dtype
105
+ dtype of parameter vector
106
+
107
+ method: {'2-point', '3-point', 'cs'}
108
+
109
+ Returns
110
+ -------
111
+ EPS: float
112
+ relative step size. May be np.float16, np.float32, np.float64
113
+
114
+ Notes
115
+ -----
116
+ The default relative step will be np.float64. However, if x0 or f0 are
117
+ smaller floating point types (np.float16, np.float32), then the smallest
118
+ floating point type is chosen.
119
+ """
120
+ # the default EPS value
121
+ EPS = np.finfo(np.float64).eps
122
+
123
+ x0_is_fp = False
124
+ if np.issubdtype(x0_dtype, np.inexact):
125
+ # if you're a floating point type then over-ride the default EPS
126
+ EPS = np.finfo(x0_dtype).eps
127
+ x0_itemsize = np.dtype(x0_dtype).itemsize
128
+ x0_is_fp = True
129
+
130
+ if np.issubdtype(f0_dtype, np.inexact):
131
+ f0_itemsize = np.dtype(f0_dtype).itemsize
132
+ # choose the smallest itemsize between x0 and f0
133
+ if x0_is_fp and f0_itemsize < x0_itemsize:
134
+ EPS = np.finfo(f0_dtype).eps
135
+
136
+ if method in ["2-point", "cs"]:
137
+ return EPS**0.5
138
+ elif method in ["3-point"]:
139
+ return EPS**(1/3)
140
+ else:
141
+ raise RuntimeError("Unknown step method, should be one of "
142
+ "{'2-point', '3-point', 'cs'}")
143
+
144
+
145
+ def _compute_absolute_step(rel_step, x0, f0, method):
146
+ """
147
+ Computes an absolute step from a relative step for finite difference
148
+ calculation.
149
+
150
+ Parameters
151
+ ----------
152
+ rel_step: None or array-like
153
+ Relative step for the finite difference calculation
154
+ x0 : np.ndarray
155
+ Parameter vector
156
+ f0 : np.ndarray or scalar
157
+ method : {'2-point', '3-point', 'cs'}
158
+
159
+ Returns
160
+ -------
161
+ h : float
162
+ The absolute step size
163
+
164
+ Notes
165
+ -----
166
+ `h` will always be np.float64. However, if `x0` or `f0` are
167
+ smaller floating point dtypes (e.g. np.float32), then the absolute
168
+ step size will be calculated from the smallest floating point size.
169
+ """
170
+ # this is used instead of np.sign(x0) because we need
171
+ # sign_x0 to be 1 when x0 == 0.
172
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
173
+
174
+ rstep = _eps_for_method(x0.dtype, f0.dtype, method)
175
+
176
+ if rel_step is None:
177
+ abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
178
+ else:
179
+ # User has requested specific relative steps.
180
+ # Don't multiply by max(1, abs(x0) because if x0 < 1 then their
181
+ # requested step is not used.
182
+ abs_step = rel_step * sign_x0 * np.abs(x0)
183
+
184
+ # however we don't want an abs_step of 0, which can happen if
185
+ # rel_step is 0, or x0 is 0. Instead, substitute a realistic step
186
+ dx = ((x0 + abs_step) - x0)
187
+ abs_step = np.where(dx == 0,
188
+ rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
189
+ abs_step)
190
+
191
+ return abs_step
192
+
193
+
194
+ def _prepare_bounds(bounds, x0):
195
+ """
196
+ Prepares new-style bounds from a two-tuple specifying the lower and upper
197
+ limits for values in x0. If a value is not bound then the lower/upper bound
198
+ will be expected to be -np.inf/np.inf.
199
+
200
+ Examples
201
+ --------
202
+ >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
203
+ (array([0., 1., 2.]), array([ 1., 2., inf]))
204
+ """
205
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
206
+ if lb.ndim == 0:
207
+ lb = np.resize(lb, x0.shape)
208
+
209
+ if ub.ndim == 0:
210
+ ub = np.resize(ub, x0.shape)
211
+
212
+ return lb, ub
213
+
214
+
215
+ def group_columns(A, order=0):
216
+ """Group columns of a 2-D matrix for sparse finite differencing [1]_.
217
+
218
+ Two columns are in the same group if in each row at least one of them
219
+ has zero. A greedy sequential algorithm is used to construct groups.
220
+
221
+ Parameters
222
+ ----------
223
+ A : array_like or sparse matrix, shape (m, n)
224
+ Matrix of which to group columns.
225
+ order : int, iterable of int with shape (n,) or None
226
+ Permutation array which defines the order of columns enumeration.
227
+ If int or None, a random permutation is used with `order` used as
228
+ a random seed. Default is 0, that is use a random permutation but
229
+ guarantee repeatability.
230
+
231
+ Returns
232
+ -------
233
+ groups : ndarray of int, shape (n,)
234
+ Contains values from 0 to n_groups-1, where n_groups is the number
235
+ of found groups. Each value ``groups[i]`` is an index of a group to
236
+ which ith column assigned. The procedure was helpful only if
237
+ n_groups is significantly less than n.
238
+
239
+ References
240
+ ----------
241
+ .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
242
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
243
+ and its Applications, 13 (1974), pp. 117-120.
244
+ """
245
+ if issparse(A):
246
+ A = csc_matrix(A)
247
+ else:
248
+ A = np.atleast_2d(A)
249
+ A = (A != 0).astype(np.int32)
250
+
251
+ if A.ndim != 2:
252
+ raise ValueError("`A` must be 2-dimensional.")
253
+
254
+ m, n = A.shape
255
+
256
+ if order is None or np.isscalar(order):
257
+ rng = np.random.RandomState(order)
258
+ order = rng.permutation(n)
259
+ else:
260
+ order = np.asarray(order)
261
+ if order.shape != (n,):
262
+ raise ValueError("`order` has incorrect shape.")
263
+
264
+ A = A[:, order]
265
+
266
+ if issparse(A):
267
+ groups = group_sparse(m, n, A.indices, A.indptr)
268
+ else:
269
+ groups = group_dense(m, n, A)
270
+
271
+ groups[order] = groups.copy()
272
+
273
+ return groups
274
+
275
+
276
+ def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
277
+ f0=None, bounds=(-np.inf, np.inf), sparsity=None,
278
+ as_linear_operator=False, args=(), kwargs={}):
279
+ """Compute finite difference approximation of the derivatives of a
280
+ vector-valued function.
281
+
282
+ If a function maps from R^n to R^m, its derivatives form m-by-n matrix
283
+ called the Jacobian, where an element (i, j) is a partial derivative of
284
+ f[i] with respect to x[j].
285
+
286
+ Parameters
287
+ ----------
288
+ fun : callable
289
+ Function of which to estimate the derivatives. The argument x
290
+ passed to this function is ndarray of shape (n,) (never a scalar
291
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
292
+ x0 : array_like of shape (n,) or float
293
+ Point at which to estimate the derivatives. Float will be converted
294
+ to a 1-D array.
295
+ method : {'3-point', '2-point', 'cs'}, optional
296
+ Finite difference method to use:
297
+ - '2-point' - use the first order accuracy forward or backward
298
+ difference.
299
+ - '3-point' - use central difference in interior points and the
300
+ second order accuracy forward or backward difference
301
+ near the boundary.
302
+ - 'cs' - use a complex-step finite difference scheme. This assumes
303
+ that the user function is real-valued and can be
304
+ analytically continued to the complex plane. Otherwise,
305
+ produces bogus results.
306
+ rel_step : None or array_like, optional
307
+ Relative step size to use. If None (default) the absolute step size is
308
+ computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
309
+ `rel_step` being selected automatically, see Notes. Otherwise
310
+ ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
311
+ sign of `h` is ignored. The calculated step size is possibly adjusted
312
+ to fit into the bounds.
313
+ abs_step : array_like, optional
314
+ Absolute step size to use, possibly adjusted to fit into the bounds.
315
+ For ``method='3-point'`` the sign of `abs_step` is ignored. By default
316
+ relative steps are used, only if ``abs_step is not None`` are absolute
317
+ steps used.
318
+ f0 : None or array_like, optional
319
+ If not None it is assumed to be equal to ``fun(x0)``, in this case
320
+ the ``fun(x0)`` is not called. Default is None.
321
+ bounds : tuple of array_like, optional
322
+ Lower and upper bounds on independent variables. Defaults to no bounds.
323
+ Each bound must match the size of `x0` or be a scalar, in the latter
324
+ case the bound will be the same for all variables. Use it to limit the
325
+ range of function evaluation. Bounds checking is not implemented
326
+ when `as_linear_operator` is True.
327
+ sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
328
+ Defines a sparsity structure of the Jacobian matrix. If the Jacobian
329
+ matrix is known to have only few non-zero elements in each row, then
330
+ it's possible to estimate its several columns by a single function
331
+ evaluation [3]_. To perform such economic computations two ingredients
332
+ are required:
333
+
334
+ * structure : array_like or sparse matrix of shape (m, n). A zero
335
+ element means that a corresponding element of the Jacobian
336
+ identically equals to zero.
337
+ * groups : array_like of shape (n,). A column grouping for a given
338
+ sparsity structure, use `group_columns` to obtain it.
339
+
340
+ A single array or a sparse matrix is interpreted as a sparsity
341
+ structure, and groups are computed inside the function. A tuple is
342
+ interpreted as (structure, groups). If None (default), a standard
343
+ dense differencing will be used.
344
+
345
+ Note, that sparse differencing makes sense only for large Jacobian
346
+ matrices where each row contains few non-zero elements.
347
+ as_linear_operator : bool, optional
348
+ When True the function returns an `scipy.sparse.linalg.LinearOperator`.
349
+ Otherwise it returns a dense array or a sparse matrix depending on
350
+ `sparsity`. The linear operator provides an efficient way of computing
351
+ ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
352
+ direct access to individual elements of the matrix. By default
353
+ `as_linear_operator` is False.
354
+ args, kwargs : tuple and dict, optional
355
+ Additional arguments passed to `fun`. Both empty by default.
356
+ The calling signature is ``fun(x, *args, **kwargs)``.
357
+
358
+ Returns
359
+ -------
360
+ J : {ndarray, sparse matrix, LinearOperator}
361
+ Finite difference approximation of the Jacobian matrix.
362
+ If `as_linear_operator` is True returns a LinearOperator
363
+ with shape (m, n). Otherwise it returns a dense array or sparse
364
+ matrix depending on how `sparsity` is defined. If `sparsity`
365
+ is None then a ndarray with shape (m, n) is returned. If
366
+ `sparsity` is not None returns a csr_matrix with shape (m, n).
367
+ For sparse matrices and linear operators it is always returned as
368
+ a 2-D structure, for ndarrays, if m=1 it is returned
369
+ as a 1-D gradient array with shape (n,).
370
+
371
+ See Also
372
+ --------
373
+ check_derivative : Check correctness of a function computing derivatives.
374
+
375
+ Notes
376
+ -----
377
+ If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
378
+ determined from the smallest floating point dtype of `x0` or `fun(x0)`,
379
+ ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
380
+ s=3 for '3-point' method. Such relative step approximately minimizes a sum
381
+ of truncation and round-off errors, see [1]_. Relative steps are used by
382
+ default. However, absolute steps are used when ``abs_step is not None``.
383
+ If any of the absolute or relative steps produces an indistinguishable
384
+ difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
385
+ automatic step size is substituted for that particular entry.
386
+
387
+ A finite difference scheme for '3-point' method is selected automatically.
388
+ The well-known central difference scheme is used for points sufficiently
389
+ far from the boundary, and 3-point forward or backward scheme is used for
390
+ points near the boundary. Both schemes have the second-order accuracy in
391
+ terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
392
+ forward and backward difference schemes.
393
+
394
+ For dense differencing when m=1 Jacobian is returned with a shape (n,),
395
+ on the other hand when n=1 Jacobian is returned with a shape (m, 1).
396
+ Our motivation is the following: a) It handles a case of gradient
397
+ computation (m=1) in a conventional way. b) It clearly separates these two
398
+ different cases. b) In all cases np.atleast_2d can be called to get 2-D
399
+ Jacobian with correct dimensions.
400
+
401
+ References
402
+ ----------
403
+ .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
404
+ Computing. 3rd edition", sec. 5.7.
405
+
406
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
407
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
408
+ and its Applications, 13 (1974), pp. 117-120.
409
+
410
+ .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
411
+ Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
412
+
413
+ Examples
414
+ --------
415
+ >>> import numpy as np
416
+ >>> from scipy.optimize._numdiff import approx_derivative
417
+ >>>
418
+ >>> def f(x, c1, c2):
419
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
420
+ ... x[0] * np.cos(c2 * x[1])])
421
+ ...
422
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
423
+ >>> approx_derivative(f, x0, args=(1, 2))
424
+ array([[ 1., 0.],
425
+ [-1., 0.]])
426
+
427
+ Bounds can be used to limit the region of function evaluation.
428
+ In the example below we compute left and right derivative at point 1.0.
429
+
430
+ >>> def g(x):
431
+ ... return x**2 if x >= 1 else x
432
+ ...
433
+ >>> x0 = 1.0
434
+ >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
435
+ array([ 1.])
436
+ >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
437
+ array([ 2.])
438
+ """
439
+ if method not in ['2-point', '3-point', 'cs']:
440
+ raise ValueError("Unknown method '%s'. " % method)
441
+
442
+ xp = array_namespace(x0)
443
+ _x = atleast_nd(x0, ndim=1, xp=xp)
444
+ _dtype = xp.float64
445
+ if xp.isdtype(_x.dtype, "real floating"):
446
+ _dtype = _x.dtype
447
+
448
+ # promotes to floating
449
+ x0 = xp.astype(_x, _dtype)
450
+
451
+ if x0.ndim > 1:
452
+ raise ValueError("`x0` must have at most 1 dimension.")
453
+
454
+ lb, ub = _prepare_bounds(bounds, x0)
455
+
456
+ if lb.shape != x0.shape or ub.shape != x0.shape:
457
+ raise ValueError("Inconsistent shapes between bounds and `x0`.")
458
+
459
+ if as_linear_operator and not (np.all(np.isinf(lb))
460
+ and np.all(np.isinf(ub))):
461
+ raise ValueError("Bounds not supported when "
462
+ "`as_linear_operator` is True.")
463
+
464
+ def fun_wrapped(x):
465
+ # send user function same fp type as x0. (but only if cs is not being
466
+ # used
467
+ if xp.isdtype(x.dtype, "real floating"):
468
+ x = xp.astype(x, x0.dtype)
469
+
470
+ f = np.atleast_1d(fun(x, *args, **kwargs))
471
+ if f.ndim > 1:
472
+ raise RuntimeError("`fun` return value has "
473
+ "more than 1 dimension.")
474
+ return f
475
+
476
+ if f0 is None:
477
+ f0 = fun_wrapped(x0)
478
+ else:
479
+ f0 = np.atleast_1d(f0)
480
+ if f0.ndim > 1:
481
+ raise ValueError("`f0` passed has more than 1 dimension.")
482
+
483
+ if np.any((x0 < lb) | (x0 > ub)):
484
+ raise ValueError("`x0` violates bound constraints.")
485
+
486
+ if as_linear_operator:
487
+ if rel_step is None:
488
+ rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
489
+
490
+ return _linear_operator_difference(fun_wrapped, x0,
491
+ f0, rel_step, method)
492
+ else:
493
+ # by default we use rel_step
494
+ if abs_step is None:
495
+ h = _compute_absolute_step(rel_step, x0, f0, method)
496
+ else:
497
+ # user specifies an absolute step
498
+ sign_x0 = (x0 >= 0).astype(float) * 2 - 1
499
+ h = abs_step
500
+
501
+ # cannot have a zero step. This might happen if x0 is very large
502
+ # or small. In which case fall back to relative step.
503
+ dx = ((x0 + h) - x0)
504
+ h = np.where(dx == 0,
505
+ _eps_for_method(x0.dtype, f0.dtype, method) *
506
+ sign_x0 * np.maximum(1.0, np.abs(x0)),
507
+ h)
508
+
509
+ if method == '2-point':
510
+ h, use_one_sided = _adjust_scheme_to_bounds(
511
+ x0, h, 1, '1-sided', lb, ub)
512
+ elif method == '3-point':
513
+ h, use_one_sided = _adjust_scheme_to_bounds(
514
+ x0, h, 1, '2-sided', lb, ub)
515
+ elif method == 'cs':
516
+ use_one_sided = False
517
+
518
+ if sparsity is None:
519
+ return _dense_difference(fun_wrapped, x0, f0, h,
520
+ use_one_sided, method)
521
+ else:
522
+ if not issparse(sparsity) and len(sparsity) == 2:
523
+ structure, groups = sparsity
524
+ else:
525
+ structure = sparsity
526
+ groups = group_columns(sparsity)
527
+
528
+ if issparse(structure):
529
+ structure = csc_matrix(structure)
530
+ else:
531
+ structure = np.atleast_2d(structure)
532
+
533
+ groups = np.atleast_1d(groups)
534
+ return _sparse_difference(fun_wrapped, x0, f0, h,
535
+ use_one_sided, structure,
536
+ groups, method)
537
+
538
+
539
+ def _linear_operator_difference(fun, x0, f0, h, method):
540
+ m = f0.size
541
+ n = x0.size
542
+
543
+ if method == '2-point':
544
+ def matvec(p):
545
+ if np.array_equal(p, np.zeros_like(p)):
546
+ return np.zeros(m)
547
+ dx = h / norm(p)
548
+ x = x0 + dx*p
549
+ df = fun(x) - f0
550
+ return df / dx
551
+
552
+ elif method == '3-point':
553
+ def matvec(p):
554
+ if np.array_equal(p, np.zeros_like(p)):
555
+ return np.zeros(m)
556
+ dx = 2*h / norm(p)
557
+ x1 = x0 - (dx/2)*p
558
+ x2 = x0 + (dx/2)*p
559
+ f1 = fun(x1)
560
+ f2 = fun(x2)
561
+ df = f2 - f1
562
+ return df / dx
563
+
564
+ elif method == 'cs':
565
+ def matvec(p):
566
+ if np.array_equal(p, np.zeros_like(p)):
567
+ return np.zeros(m)
568
+ dx = h / norm(p)
569
+ x = x0 + dx*p*1.j
570
+ f1 = fun(x)
571
+ df = f1.imag
572
+ return df / dx
573
+
574
+ else:
575
+ raise RuntimeError("Never be here.")
576
+
577
+ return LinearOperator((m, n), matvec)
578
+
579
+
580
+ def _dense_difference(fun, x0, f0, h, use_one_sided, method):
581
+ m = f0.size
582
+ n = x0.size
583
+ J_transposed = np.empty((n, m))
584
+ h_vecs = np.diag(h)
585
+
586
+ for i in range(h.size):
587
+ if method == '2-point':
588
+ x = x0 + h_vecs[i]
589
+ dx = x[i] - x0[i] # Recompute dx as exactly representable number.
590
+ df = fun(x) - f0
591
+ elif method == '3-point' and use_one_sided[i]:
592
+ x1 = x0 + h_vecs[i]
593
+ x2 = x0 + 2 * h_vecs[i]
594
+ dx = x2[i] - x0[i]
595
+ f1 = fun(x1)
596
+ f2 = fun(x2)
597
+ df = -3.0 * f0 + 4 * f1 - f2
598
+ elif method == '3-point' and not use_one_sided[i]:
599
+ x1 = x0 - h_vecs[i]
600
+ x2 = x0 + h_vecs[i]
601
+ dx = x2[i] - x1[i]
602
+ f1 = fun(x1)
603
+ f2 = fun(x2)
604
+ df = f2 - f1
605
+ elif method == 'cs':
606
+ f1 = fun(x0 + h_vecs[i]*1.j)
607
+ df = f1.imag
608
+ dx = h_vecs[i, i]
609
+ else:
610
+ raise RuntimeError("Never be here.")
611
+
612
+ J_transposed[i] = df / dx
613
+
614
+ if m == 1:
615
+ J_transposed = np.ravel(J_transposed)
616
+
617
+ return J_transposed.T
618
+
619
+
620
+ def _sparse_difference(fun, x0, f0, h, use_one_sided,
621
+ structure, groups, method):
622
+ m = f0.size
623
+ n = x0.size
624
+ row_indices = []
625
+ col_indices = []
626
+ fractions = []
627
+
628
+ n_groups = np.max(groups) + 1
629
+ for group in range(n_groups):
630
+ # Perturb variables which are in the same group simultaneously.
631
+ e = np.equal(group, groups)
632
+ h_vec = h * e
633
+ if method == '2-point':
634
+ x = x0 + h_vec
635
+ dx = x - x0
636
+ df = fun(x) - f0
637
+ # The result is written to columns which correspond to perturbed
638
+ # variables.
639
+ cols, = np.nonzero(e)
640
+ # Find all non-zero elements in selected columns of Jacobian.
641
+ i, j, _ = find(structure[:, cols])
642
+ # Restore column indices in the full array.
643
+ j = cols[j]
644
+ elif method == '3-point':
645
+ # Here we do conceptually the same but separate one-sided
646
+ # and two-sided schemes.
647
+ x1 = x0.copy()
648
+ x2 = x0.copy()
649
+
650
+ mask_1 = use_one_sided & e
651
+ x1[mask_1] += h_vec[mask_1]
652
+ x2[mask_1] += 2 * h_vec[mask_1]
653
+
654
+ mask_2 = ~use_one_sided & e
655
+ x1[mask_2] -= h_vec[mask_2]
656
+ x2[mask_2] += h_vec[mask_2]
657
+
658
+ dx = np.zeros(n)
659
+ dx[mask_1] = x2[mask_1] - x0[mask_1]
660
+ dx[mask_2] = x2[mask_2] - x1[mask_2]
661
+
662
+ f1 = fun(x1)
663
+ f2 = fun(x2)
664
+
665
+ cols, = np.nonzero(e)
666
+ i, j, _ = find(structure[:, cols])
667
+ j = cols[j]
668
+
669
+ mask = use_one_sided[j]
670
+ df = np.empty(m)
671
+
672
+ rows = i[mask]
673
+ df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
674
+
675
+ rows = i[~mask]
676
+ df[rows] = f2[rows] - f1[rows]
677
+ elif method == 'cs':
678
+ f1 = fun(x0 + h_vec*1.j)
679
+ df = f1.imag
680
+ dx = h_vec
681
+ cols, = np.nonzero(e)
682
+ i, j, _ = find(structure[:, cols])
683
+ j = cols[j]
684
+ else:
685
+ raise ValueError("Never be here.")
686
+
687
+ # All that's left is to compute the fraction. We store i, j and
688
+ # fractions as separate arrays and later construct coo_matrix.
689
+ row_indices.append(i)
690
+ col_indices.append(j)
691
+ fractions.append(df[i] / dx[j])
692
+
693
+ row_indices = np.hstack(row_indices)
694
+ col_indices = np.hstack(col_indices)
695
+ fractions = np.hstack(fractions)
696
+ J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
697
+ return csr_matrix(J)
698
+
699
+
700
+ def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
701
+ kwargs={}):
702
+ """Check correctness of a function computing derivatives (Jacobian or
703
+ gradient) by comparison with a finite difference approximation.
704
+
705
+ Parameters
706
+ ----------
707
+ fun : callable
708
+ Function of which to estimate the derivatives. The argument x
709
+ passed to this function is ndarray of shape (n,) (never a scalar
710
+ even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
711
+ jac : callable
712
+ Function which computes Jacobian matrix of `fun`. It must work with
713
+ argument x the same way as `fun`. The return value must be array_like
714
+ or sparse matrix with an appropriate shape.
715
+ x0 : array_like of shape (n,) or float
716
+ Point at which to estimate the derivatives. Float will be converted
717
+ to 1-D array.
718
+ bounds : 2-tuple of array_like, optional
719
+ Lower and upper bounds on independent variables. Defaults to no bounds.
720
+ Each bound must match the size of `x0` or be a scalar, in the latter
721
+ case the bound will be the same for all variables. Use it to limit the
722
+ range of function evaluation.
723
+ args, kwargs : tuple and dict, optional
724
+ Additional arguments passed to `fun` and `jac`. Both empty by default.
725
+ The calling signature is ``fun(x, *args, **kwargs)`` and the same
726
+ for `jac`.
727
+
728
+ Returns
729
+ -------
730
+ accuracy : float
731
+ The maximum among all relative errors for elements with absolute values
732
+ higher than 1 and absolute errors for elements with absolute values
733
+ less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
734
+ then it is likely that your `jac` implementation is correct.
735
+
736
+ See Also
737
+ --------
738
+ approx_derivative : Compute finite difference approximation of derivative.
739
+
740
+ Examples
741
+ --------
742
+ >>> import numpy as np
743
+ >>> from scipy.optimize._numdiff import check_derivative
744
+ >>>
745
+ >>>
746
+ >>> def f(x, c1, c2):
747
+ ... return np.array([x[0] * np.sin(c1 * x[1]),
748
+ ... x[0] * np.cos(c2 * x[1])])
749
+ ...
750
+ >>> def jac(x, c1, c2):
751
+ ... return np.array([
752
+ ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
753
+ ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
754
+ ... ])
755
+ ...
756
+ >>>
757
+ >>> x0 = np.array([1.0, 0.5 * np.pi])
758
+ >>> check_derivative(f, jac, x0, args=(1, 2))
759
+ 2.4492935982947064e-16
760
+ """
761
+ J_to_test = jac(x0, *args, **kwargs)
762
+ if issparse(J_to_test):
763
+ J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
764
+ args=args, kwargs=kwargs)
765
+ J_to_test = csr_matrix(J_to_test)
766
+ abs_err = J_to_test - J_diff
767
+ i, j, abs_err_data = find(abs_err)
768
+ J_diff_data = np.asarray(J_diff[i, j]).ravel()
769
+ return np.max(np.abs(abs_err_data) /
770
+ np.maximum(1, np.abs(J_diff_data)))
771
+ else:
772
+ J_diff = approx_derivative(fun, x0, bounds=bounds,
773
+ args=args, kwargs=kwargs)
774
+ abs_err = np.abs(J_to_test - J_diff)
775
+ return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_optimize.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (224 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for removing redundant (linearly dependent) equations from linear
3
+ programming equality constraints.
4
+ """
5
+ # Author: Matt Haberland
6
+
7
+ import numpy as np
8
+ from scipy.linalg import svd
9
+ from scipy.linalg.interpolative import interp_decomp
10
+ import scipy
11
+ from scipy.linalg.blas import dtrsm
12
+
13
+
14
+ def _row_count(A):
15
+ """
16
+ Counts the number of nonzeros in each row of input array A.
17
+ Nonzeros are defined as any element with absolute value greater than
18
+ tol = 1e-13. This value should probably be an input to the function.
19
+
20
+ Parameters
21
+ ----------
22
+ A : 2-D array
23
+ An array representing a matrix
24
+
25
+ Returns
26
+ -------
27
+ rowcount : 1-D array
28
+ Number of nonzeros in each row of A
29
+
30
+ """
31
+ tol = 1e-13
32
+ return np.array((abs(A) > tol).sum(axis=1)).flatten()
33
+
34
+
35
+ def _get_densest(A, eligibleRows):
36
+ """
37
+ Returns the index of the densest row of A. Ignores rows that are not
38
+ eligible for consideration.
39
+
40
+ Parameters
41
+ ----------
42
+ A : 2-D array
43
+ An array representing a matrix
44
+ eligibleRows : 1-D logical array
45
+ Values indicate whether the corresponding row of A is eligible
46
+ to be considered
47
+
48
+ Returns
49
+ -------
50
+ i_densest : int
51
+ Index of the densest row in A eligible for consideration
52
+
53
+ """
54
+ rowCounts = _row_count(A)
55
+ return np.argmax(rowCounts * eligibleRows)
56
+
57
+
58
+ def _remove_zero_rows(A, b):
59
+ """
60
+ Eliminates trivial equations from system of equations defined by Ax = b
61
+ and identifies trivial infeasibilities
62
+
63
+ Parameters
64
+ ----------
65
+ A : 2-D array
66
+ An array representing the left-hand side of a system of equations
67
+ b : 1-D array
68
+ An array representing the right-hand side of a system of equations
69
+
70
+ Returns
71
+ -------
72
+ A : 2-D array
73
+ An array representing the left-hand side of a system of equations
74
+ b : 1-D array
75
+ An array representing the right-hand side of a system of equations
76
+ status: int
77
+ An integer indicating the status of the removal operation
78
+ 0: No infeasibility identified
79
+ 2: Trivially infeasible
80
+ message : str
81
+ A string descriptor of the exit status of the optimization.
82
+
83
+ """
84
+ status = 0
85
+ message = ""
86
+ i_zero = _row_count(A) == 0
87
+ A = A[np.logical_not(i_zero), :]
88
+ if not np.allclose(b[i_zero], 0):
89
+ status = 2
90
+ message = "There is a zero row in A_eq with a nonzero corresponding " \
91
+ "entry in b_eq. The problem is infeasible."
92
+ b = b[np.logical_not(i_zero)]
93
+ return A, b, status, message
94
+
95
+
96
+ def bg_update_dense(plu, perm_r, v, j):
97
+ LU, p = plu
98
+
99
+ vperm = v[perm_r]
100
+ u = dtrsm(1, LU, vperm, lower=1, diag=1)
101
+ LU[:j+1, j] = u[:j+1]
102
+ l = u[j+1:]
103
+ piv = LU[j, j]
104
+ LU[j+1:, j] += (l/piv)
105
+ return LU, p
106
+
107
+
108
+ def _remove_redundancy_pivot_dense(A, rhs, true_rank=None):
109
+ """
110
+ Eliminates redundant equations from system of equations defined by Ax = b
111
+ and identifies infeasibilities.
112
+
113
+ Parameters
114
+ ----------
115
+ A : 2-D sparse matrix
116
+ An matrix representing the left-hand side of a system of equations
117
+ rhs : 1-D array
118
+ An array representing the right-hand side of a system of equations
119
+
120
+ Returns
121
+ -------
122
+ A : 2-D sparse matrix
123
+ A matrix representing the left-hand side of a system of equations
124
+ rhs : 1-D array
125
+ An array representing the right-hand side of a system of equations
126
+ status: int
127
+ An integer indicating the status of the system
128
+ 0: No infeasibility identified
129
+ 2: Trivially infeasible
130
+ message : str
131
+ A string descriptor of the exit status of the optimization.
132
+
133
+ References
134
+ ----------
135
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
136
+ large-scale linear programming." Optimization Methods and Software
137
+ 6.3 (1995): 219-227.
138
+
139
+ """
140
+ tolapiv = 1e-8
141
+ tolprimal = 1e-8
142
+ status = 0
143
+ message = ""
144
+ inconsistent = ("There is a linear combination of rows of A_eq that "
145
+ "results in zero, suggesting a redundant constraint. "
146
+ "However the same linear combination of b_eq is "
147
+ "nonzero, suggesting that the constraints conflict "
148
+ "and the problem is infeasible.")
149
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
150
+
151
+ if status != 0:
152
+ return A, rhs, status, message
153
+
154
+ m, n = A.shape
155
+
156
+ v = list(range(m)) # Artificial column indices.
157
+ b = list(v) # Basis column indices.
158
+ # This is better as a list than a set because column order of basis matrix
159
+ # needs to be consistent.
160
+ d = [] # Indices of dependent rows
161
+ perm_r = None
162
+
163
+ A_orig = A
164
+ A = np.zeros((m, m + n), order='F')
165
+ np.fill_diagonal(A, 1)
166
+ A[:, m:] = A_orig
167
+ e = np.zeros(m)
168
+
169
+ js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis
170
+ # manual masking was faster than masked array
171
+ js_mask = np.ones(js_candidates.shape, dtype=bool)
172
+
173
+ # Implements basic algorithm from [2]
174
+ # Uses some of the suggested improvements (removing zero rows and
175
+ # Bartels-Golub update idea).
176
+ # Removing column singletons would be easy, but it is not as important
177
+ # because the procedure is performed only on the equality constraint
178
+ # matrix from the original problem - not on the canonical form matrix,
179
+ # which would have many more column singletons due to slack variables
180
+ # from the inequality constraints.
181
+ # The thoughts on "crashing" the initial basis are only really useful if
182
+ # the matrix is sparse.
183
+
184
+ lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial
185
+ perm_r = lu[1]
186
+ for i in v:
187
+
188
+ e[i] = 1
189
+ if i > 0:
190
+ e[i-1] = 0
191
+
192
+ try: # fails for i==0 and any time it gets ill-conditioned
193
+ j = b[i-1]
194
+ lu = bg_update_dense(lu, perm_r, A[:, j], i-1)
195
+ except Exception:
196
+ lu = scipy.linalg.lu_factor(A[:, b])
197
+ LU, p = lu
198
+ perm_r = list(range(m))
199
+ for i1, i2 in enumerate(p):
200
+ perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1]
201
+
202
+ pi = scipy.linalg.lu_solve(lu, e, trans=1)
203
+
204
+ js = js_candidates[js_mask]
205
+ batch = 50
206
+
207
+ # This is a tiny bit faster than looping over columns individually,
208
+ # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv:
209
+ for j_index in range(0, len(js), batch):
210
+ j_indices = js[j_index: min(j_index+batch, len(js))]
211
+
212
+ c = abs(A[:, j_indices].transpose().dot(pi))
213
+ if (c > tolapiv).any():
214
+ j = js[j_index + np.argmax(c)] # very independent column
215
+ b[i] = j
216
+ js_mask[j-m] = False
217
+ break
218
+ else:
219
+ bibar = pi.T.dot(rhs.reshape(-1, 1))
220
+ bnorm = np.linalg.norm(rhs)
221
+ if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent
222
+ status = 2
223
+ message = inconsistent
224
+ return A_orig, rhs, status, message
225
+ else: # dependent
226
+ d.append(i)
227
+ if true_rank is not None and len(d) == m - true_rank:
228
+ break # found all redundancies
229
+
230
+ keep = set(range(m))
231
+ keep = list(keep - set(d))
232
+ return A_orig[keep, :], rhs[keep], status, message
233
+
234
+
235
+ def _remove_redundancy_pivot_sparse(A, rhs):
236
+ """
237
+ Eliminates redundant equations from system of equations defined by Ax = b
238
+ and identifies infeasibilities.
239
+
240
+ Parameters
241
+ ----------
242
+ A : 2-D sparse matrix
243
+ An matrix representing the left-hand side of a system of equations
244
+ rhs : 1-D array
245
+ An array representing the right-hand side of a system of equations
246
+
247
+ Returns
248
+ -------
249
+ A : 2-D sparse matrix
250
+ A matrix representing the left-hand side of a system of equations
251
+ rhs : 1-D array
252
+ An array representing the right-hand side of a system of equations
253
+ status: int
254
+ An integer indicating the status of the system
255
+ 0: No infeasibility identified
256
+ 2: Trivially infeasible
257
+ message : str
258
+ A string descriptor of the exit status of the optimization.
259
+
260
+ References
261
+ ----------
262
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
263
+ large-scale linear programming." Optimization Methods and Software
264
+ 6.3 (1995): 219-227.
265
+
266
+ """
267
+
268
+ tolapiv = 1e-8
269
+ tolprimal = 1e-8
270
+ status = 0
271
+ message = ""
272
+ inconsistent = ("There is a linear combination of rows of A_eq that "
273
+ "results in zero, suggesting a redundant constraint. "
274
+ "However the same linear combination of b_eq is "
275
+ "nonzero, suggesting that the constraints conflict "
276
+ "and the problem is infeasible.")
277
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
278
+
279
+ if status != 0:
280
+ return A, rhs, status, message
281
+
282
+ m, n = A.shape
283
+
284
+ v = list(range(m)) # Artificial column indices.
285
+ b = list(v) # Basis column indices.
286
+ # This is better as a list than a set because column order of basis matrix
287
+ # needs to be consistent.
288
+ k = set(range(m, m+n)) # Structural column indices.
289
+ d = [] # Indices of dependent rows
290
+
291
+ A_orig = A
292
+ A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc()
293
+ e = np.zeros(m)
294
+
295
+ # Implements basic algorithm from [2]
296
+ # Uses only one of the suggested improvements (removing zero rows).
297
+ # Removing column singletons would be easy, but it is not as important
298
+ # because the procedure is performed only on the equality constraint
299
+ # matrix from the original problem - not on the canonical form matrix,
300
+ # which would have many more column singletons due to slack variables
301
+ # from the inequality constraints.
302
+ # The thoughts on "crashing" the initial basis sound useful, but the
303
+ # description of the procedure seems to assume a lot of familiarity with
304
+ # the subject; it is not very explicit. I already went through enough
305
+ # trouble getting the basic algorithm working, so I was not interested in
306
+ # trying to decipher this, too. (Overall, the paper is fraught with
307
+ # mistakes and ambiguities - which is strange, because the rest of
308
+ # Andersen's papers are quite good.)
309
+ # I tried and tried and tried to improve performance using the
310
+ # Bartels-Golub update. It works, but it's only practical if the LU
311
+ # factorization can be specialized as described, and that is not possible
312
+ # until the SciPy SuperLU interface permits control over column
313
+ # permutation - see issue #7700.
314
+
315
+ for i in v:
316
+ B = A[:, b]
317
+
318
+ e[i] = 1
319
+ if i > 0:
320
+ e[i-1] = 0
321
+
322
+ pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1)
323
+
324
+ js = list(k-set(b)) # not efficient, but this is not the time sink...
325
+
326
+ # Due to overhead, it tends to be faster (for problems tested) to
327
+ # compute the full matrix-vector product rather than individual
328
+ # vector-vector products (with the chance of terminating as soon
329
+ # as any are nonzero). For very large matrices, it might be worth
330
+ # it to compute, say, 100 or 1000 at a time and stop when a nonzero
331
+ # is found.
332
+
333
+ c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0]
334
+ if len(c) > 0: # independent
335
+ j = js[c[0]]
336
+ # in a previous commit, the previous line was changed to choose
337
+ # index j corresponding with the maximum dot product.
338
+ # While this avoided issues with almost
339
+ # singular matrices, it slowed the routine in most NETLIB tests.
340
+ # I think this is because these columns were denser than the
341
+ # first column with nonzero dot product (c[0]).
342
+ # It would be nice to have a heuristic that balances sparsity with
343
+ # high dot product, but I don't think it's worth the time to
344
+ # develop one right now. Bartels-Golub update is a much higher
345
+ # priority.
346
+ b[i] = j # replace artificial column
347
+ else:
348
+ bibar = pi.T.dot(rhs.reshape(-1, 1))
349
+ bnorm = np.linalg.norm(rhs)
350
+ if abs(bibar)/(1 + bnorm) > tolprimal:
351
+ status = 2
352
+ message = inconsistent
353
+ return A_orig, rhs, status, message
354
+ else: # dependent
355
+ d.append(i)
356
+
357
+ keep = set(range(m))
358
+ keep = list(keep - set(d))
359
+ return A_orig[keep, :], rhs[keep], status, message
360
+
361
+
362
+ def _remove_redundancy_svd(A, b):
363
+ """
364
+ Eliminates redundant equations from system of equations defined by Ax = b
365
+ and identifies infeasibilities.
366
+
367
+ Parameters
368
+ ----------
369
+ A : 2-D array
370
+ An array representing the left-hand side of a system of equations
371
+ b : 1-D array
372
+ An array representing the right-hand side of a system of equations
373
+
374
+ Returns
375
+ -------
376
+ A : 2-D array
377
+ An array representing the left-hand side of a system of equations
378
+ b : 1-D array
379
+ An array representing the right-hand side of a system of equations
380
+ status: int
381
+ An integer indicating the status of the system
382
+ 0: No infeasibility identified
383
+ 2: Trivially infeasible
384
+ message : str
385
+ A string descriptor of the exit status of the optimization.
386
+
387
+ References
388
+ ----------
389
+ .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
390
+ large-scale linear programming." Optimization Methods and Software
391
+ 6.3 (1995): 219-227.
392
+
393
+ """
394
+
395
+ A, b, status, message = _remove_zero_rows(A, b)
396
+
397
+ if status != 0:
398
+ return A, b, status, message
399
+
400
+ U, s, Vh = svd(A)
401
+ eps = np.finfo(float).eps
402
+ tol = s.max() * max(A.shape) * eps
403
+
404
+ m, n = A.shape
405
+ s_min = s[-1] if m <= n else 0
406
+
407
+ # this algorithm is faster than that of [2] when the nullspace is small
408
+ # but it could probably be improvement by randomized algorithms and with
409
+ # a sparse implementation.
410
+ # it relies on repeated singular value decomposition to find linearly
411
+ # dependent rows (as identified by columns of U that correspond with zero
412
+ # singular values). Unfortunately, only one row can be removed per
413
+ # decomposition (I tried otherwise; doing so can cause problems.)
414
+ # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds
415
+ # but that function is unreliable at finding singular values near zero.
416
+ # Finding max eigenvalue L of A A^T, then largest eigenvalue (and
417
+ # associated eigenvector) of -A A^T + L I (I is identity) via power
418
+ # iteration would also work in theory, but is only efficient if the
419
+ # smallest nonzero eigenvalue of A A^T is close to the largest nonzero
420
+ # eigenvalue.
421
+
422
+ while abs(s_min) < tol:
423
+ v = U[:, -1] # TODO: return these so user can eliminate from problem?
424
+ # rows need to be represented in significant amount
425
+ eligibleRows = np.abs(v) > tol * 10e6
426
+ if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol):
427
+ status = 4
428
+ message = ("Due to numerical issues, redundant equality "
429
+ "constraints could not be removed automatically. "
430
+ "Try providing your constraint matrices as sparse "
431
+ "matrices to activate sparse presolve, try turning "
432
+ "off redundancy removal, or try turning off presolve "
433
+ "altogether.")
434
+ break
435
+ if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349
436
+ status = 2
437
+ message = ("There is a linear combination of rows of A_eq that "
438
+ "results in zero, suggesting a redundant constraint. "
439
+ "However the same linear combination of b_eq is "
440
+ "nonzero, suggesting that the constraints conflict "
441
+ "and the problem is infeasible.")
442
+ break
443
+
444
+ i_remove = _get_densest(A, eligibleRows)
445
+ A = np.delete(A, i_remove, axis=0)
446
+ b = np.delete(b, i_remove)
447
+ U, s, Vh = svd(A)
448
+ m, n = A.shape
449
+ s_min = s[-1] if m <= n else 0
450
+
451
+ return A, b, status, message
452
+
453
+
454
+ def _remove_redundancy_id(A, rhs, rank=None, randomized=True):
455
+ """Eliminates redundant equations from a system of equations.
456
+
457
+ Eliminates redundant equations from system of equations defined by Ax = b
458
+ and identifies infeasibilities.
459
+
460
+ Parameters
461
+ ----------
462
+ A : 2-D array
463
+ An array representing the left-hand side of a system of equations
464
+ rhs : 1-D array
465
+ An array representing the right-hand side of a system of equations
466
+ rank : int, optional
467
+ The rank of A
468
+ randomized: bool, optional
469
+ True for randomized interpolative decomposition
470
+
471
+ Returns
472
+ -------
473
+ A : 2-D array
474
+ An array representing the left-hand side of a system of equations
475
+ rhs : 1-D array
476
+ An array representing the right-hand side of a system of equations
477
+ status: int
478
+ An integer indicating the status of the system
479
+ 0: No infeasibility identified
480
+ 2: Trivially infeasible
481
+ message : str
482
+ A string descriptor of the exit status of the optimization.
483
+
484
+ """
485
+
486
+ status = 0
487
+ message = ""
488
+ inconsistent = ("There is a linear combination of rows of A_eq that "
489
+ "results in zero, suggesting a redundant constraint. "
490
+ "However the same linear combination of b_eq is "
491
+ "nonzero, suggesting that the constraints conflict "
492
+ "and the problem is infeasible.")
493
+
494
+ A, rhs, status, message = _remove_zero_rows(A, rhs)
495
+
496
+ if status != 0:
497
+ return A, rhs, status, message
498
+
499
+ m, n = A.shape
500
+
501
+ k = rank
502
+ if rank is None:
503
+ k = np.linalg.matrix_rank(A)
504
+
505
+ idx, proj = interp_decomp(A.T, k, rand=randomized)
506
+
507
+ # first k entries in idx are indices of the independent rows
508
+ # remaining entries are the indices of the m-k dependent rows
509
+ # proj provides a linear combinations of rows of A2 that form the
510
+ # remaining m-k (dependent) rows. The same linear combination of entries
511
+ # in rhs2 must give the remaining m-k entries. If not, the system is
512
+ # inconsistent, and the problem is infeasible.
513
+ if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]):
514
+ status = 2
515
+ message = inconsistent
516
+
517
+ # sort indices because the other redundancy removal routines leave rows
518
+ # in original order and tests were written with that in mind
519
+ idx = sorted(idx[:k])
520
+ A2 = A[idx, :]
521
+ rhs2 = rhs[idx]
522
+ return A2, rhs2, status, message