diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a4c0aa4aad6055ba25a30d950ff6541e86790a5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca92c03dbbc7f6070d4500c3f7872eb77c596a92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb3de13d9f5cda2a4daf6f960d28e8845ad2bb99 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..d9652facb776e8c791f2b209c1aec69d24ffe6c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py @@ -0,0 +1,169 @@ +import warnings +import numpy as np +import pytest + +from scipy.fft._fftlog import fht, ifht, fhtoffset +from scipy.special import poch + +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close + +pytestmark = array_api_compatible + + +def test_fht_agrees_with_fftlog(xp): + # check that fht numerically agrees with the output from Fortran FFTLog, + # the results were generated with the provided `fftlogtest` program, + # after fixing how the k array is generated (divide range by n-1, not n) + + # test function, analytical Hankel transform is of the same form + def f(r, mu): + return r**(mu+1)*np.exp(-r**2/2) + + r = np.logspace(-4, 4, 16) + + dln = np.log(r[1]/r[0]) + mu = 0.3 + offset = 0.0 + bias = 0.0 + + a = xp.asarray(f(r, mu)) + + # test 1: compute as given + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02, + -0.1949518286432330E-02, +0.3789220182554077E-02, + +0.5093959119952945E-03, +0.2785387803618774E-01, + +0.9944952700848897E-01, +0.4599202164586588E+00, + +0.3157462160881342E+00, -0.8201236844404755E-03, + -0.7834031308271878E-03, +0.3931444945110708E-03, + -0.2697710625194777E-03, +0.3568398050238820E-03, + -0.5554454827797206E-03, +0.8286331026468585E-03] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 2: change to optimal offset + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05, + +0.3150140927838524E-03, +0.9149121960963704E-03, + +0.5808089753959363E-02, +0.2548065256377240E-01, + +0.1339477692089897E+00, +0.4821530509479356E+00, + +0.2659899781579785E+00, -0.1116475278448113E-01, + +0.1791441617592385E-02, -0.4181810476548056E-03, + +0.1314963536765343E-03, -0.5422057743066297E-04, + +0.3208681804170443E-04, -0.2696849476008234E-04] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 3: positive bias + bias = 0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00, + +0.1065374386206564E+00, -0.5121739602708132E-01, + +0.2636649319269470E-01, +0.1697209218849693E-01, + +0.1250215614723183E+00, +0.4739583261486729E+00, + +0.2841149874912028E+00, -0.8312764741645729E-02, + +0.1024233505508988E-02, -0.1644902767389120E-03, + +0.3305775476926270E-04, -0.7786993194882709E-05, + +0.1962258449520547E-05, -0.8977895734909250E-06] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 4: negative bias + bias = -0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04, + +0.2123969254700955E-03, +0.1009558244834628E-02, + +0.5131386375222176E-02, +0.2461678673516286E-01, + +0.1235812845384476E+00, +0.4719570096404403E+00, + +0.2893487490631317E+00, -0.1686570611318716E-01, + +0.2231398155172505E-01, -0.1480742256379873E-01, + +0.1692387813500801E+00, +0.3097490354365797E+00, + +2.7593607182401860E+00, 10.5251075070045800E+00] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + +@pytest.mark.parametrize('optimal', [True, False]) +@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0]) +@pytest.mark.parametrize('bias', [0, 0.1, -0.1]) +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_identity(n, bias, offset, optimal, xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(n)) + dln = rng.uniform(-1, 1) + mu = rng.uniform(-2, 2) + + if optimal: + offset = fhtoffset(dln, mu, initial=offset, bias=bias) + + A = fht(a, dln, mu, offset=offset, bias=bias) + a_ = ifht(A, dln, mu, offset=offset, bias=bias) + + xp_assert_close(a_, a) + + +def test_fht_special_cases(xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(64)) + dln = rng.uniform(-1, 1) + + # let x = (mu+1+q)/2, y = (mu+1-q)/2, M = {0, -1, -2, ...} + + # case 1: x in M, y in M => well-defined transform + mu, bias = -4.0, 1.0 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 2: x not in M, y in M => well-defined transform + mu, bias = -2.5, 0.5 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 3: x in M, y not in M => singular transform + mu, bias = -3.5, 0.5 + with pytest.warns(Warning) as record: + fht(a, dln, mu, bias=bias) + assert record, 'fht did not warn about a singular transform' + + # case 4: x not in M, y in M => singular inverse transform + mu, bias = -2.5, 0.5 + with pytest.warns(Warning) as record: + ifht(a, dln, mu, bias=bias) + assert record, 'ifht did not warn about a singular transform' + + +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_exact(n, xp): + rng = np.random.RandomState(3491349965) + + # for a(r) a power law r^\gamma, the fast Hankel transform produces the + # exact continuous Hankel transform if biased with q = \gamma + + mu = rng.uniform(0, 3) + + # convergence of HT: -1-mu < gamma < 1/2 + gamma = rng.uniform(-1-mu, 1/2) + + r = np.logspace(-2, 2, n) + a = xp.asarray(r**gamma) + + dln = np.log(r[1]/r[0]) + + offset = fhtoffset(dln, mu, initial=0.0, bias=gamma) + + A = fht(a, dln, mu, offset=offset, bias=gamma) + + k = np.exp(offset)/r[::-1] + + # analytical result + At = xp.asarray((2/k)**gamma * poch((mu+1-gamma)/2, gamma)) + + xp_assert_close(A, At) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..c99fea40f7f58480ba3532cb490c78cde48ce4a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py @@ -0,0 +1,445 @@ +"""Includes test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 +Modified for Array API, 2023 + +""" +from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes +from numpy.testing import assert_equal +from pytest import raises as assert_raises +import pytest +import numpy as np +import sys +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, SCIPY_DEVICE +from scipy import fft + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + +_5_smooth_numbers = [ + 2, 3, 4, 5, 6, 8, 9, 10, + 2 * 3 * 5, + 2**3 * 3**5, + 2**3 * 3**3 * 5**2, +] + +def test_next_fast_len(): + for n in _5_smooth_numbers: + assert_equal(next_fast_len(n), n) + + +def _assert_n_smooth(x, n): + x_orig = x + if n < 2: + assert False + + while True: + q, r = divmod(x, 2) + if r != 0: + break + x = q + + for d in range(3, n+1, 2): + while True: + q, r = divmod(x, d) + if r != 0: + break + x = q + + assert x == 1, \ + f'x={x_orig} is not {n}-smooth, remainder={x}' + + +@skip_if_array_api(np_only=True) +class TestNextFastLen: + + def test_next_fast_len(self): + np.random.seed(1234) + + def nums(): + yield from range(1, 1000) + yield 2**5 * 3**5 * 4**5 + 1 + + for n in nums(): + m = next_fast_len(n) + _assert_n_smooth(m, 11) + assert m == next_fast_len(n, False) + + m = next_fast_len(n, True) + _assert_n_smooth(m, 5) + + def test_np_integers(self): + ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64] + for ityp in ITYPES: + x = ityp(12345) + testN = next_fast_len(x) + assert_equal(testN, next_fast_len(int(x))) + + def testnext_fast_len_small(self): + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15, + 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000 + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + @pytest.mark.xfail(sys.maxsize < 2**32, + reason="Hamming Numbers too large for 32-bit", + raises=ValueError, strict=True) + def testnext_fast_len_big(self): + hams = { + 510183360: 510183360, 510183360 + 1: 512000000, + 511000000: 512000000, + 854296875: 854296875, 854296875 + 1: 859963392, + 196608000000: 196608000000, 196608000000 + 1: 196830000000, + 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208, + 206391214080000: 206391214080000, + 206391214080000 + 1: 206624260800000, + 470184984576000: 470184984576000, + 470184984576000 + 1: 470715894135000, + 7222041363087360: 7222041363087360, + 7222041363087360 + 1: 7230196133913600, + # power of 5 5**23 + 11920928955078125: 11920928955078125, + 11920928955078125 - 1: 11920928955078125, + # power of 3 3**34 + 16677181699666569: 16677181699666569, + 16677181699666569 - 1: 16677181699666569, + # power of 2 2**54 + 18014398509481984: 18014398509481984, + 18014398509481984 - 1: 18014398509481984, + # above this, int(ceil(n)) == int(ceil(n+1)) + 19200000000000000: 19200000000000000, + 19200000000000000 + 1: 19221679687500000, + 288230376151711744: 288230376151711744, + 288230376151711744 + 1: 288325195312500000, + 288325195312500000 - 1: 288325195312500000, + 288325195312500000: 288325195312500000, + 288325195312500000 + 1: 288555831593533440, + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + def test_keyword_args(self): + assert next_fast_len(11, real=True) == 12 + assert next_fast_len(target=7, real=False) == 7 + + +@skip_if_array_api(cpu_only=True) +class Test_init_nd_shape_and_axes: + + def test_py_0d_defaults(self, xp): + x = xp.asarray(4) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_0d_defaults(self, xp): + x = xp.asarray(7.) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_1d_defaults(self, xp): + x = xp.asarray([1, 2, 3]) + shape = None + axes = None + + shape_expected = (3,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_1d_defaults(self, xp): + x = xp.arange(0, 1, .1) + shape = None + axes = None + + shape_expected = (10,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_2d_defaults(self, xp): + x = xp.asarray([[1, 2, 3, 4], + [5, 6, 7, 8]]) + shape = None + axes = None + + shape_expected = (2, 4) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_2d_defaults(self, xp): + x = xp.arange(0, 1, .1) + x = xp.reshape(x, (5, 2)) + shape = None + axes = None + + shape_expected = (5, 2) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_defaults(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = None + + shape_expected = (6, 2, 5, 3, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, -1, 1, 4] + axes = None + + shape_expected = (10, 2, 5, 1, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = [4, 1, 2] + + shape_expected = (4, 2, 5) + axes_expected = [4, 1, 2] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, 2] + axes = [1, 0, 3] + + shape_expected = (10, 6, 2) + axes_expected = [1, 0, 3] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_shape_axes_subset(self, xp): + x = xp.zeros((2, 3, 4, 5)) + shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None) + + assert shape == (5, 5, 5) + assert axes == [1, 2, 3] + + def test_errors(self, xp): + x = xp.zeros(1) + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + _init_nd_shape_and_axes(x, shape=None, axes=[0, 0]) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + _init_nd_shape_and_axes(xp.zeros([1, 1, 1, 1]), + shape=[1, 2, 3], axes=[1]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[0\]\) specified"): + _init_nd_shape_and_axes(x, shape=[0], axes=None) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[-2\]\) specified"): + _init_nd_shape_and_axes(x, shape=-2, axes=None) + + +@skip_if_array_api('torch', + reasons=['torch.fft not yet implemented by array-api-compat']) +class TestFFTShift: + + def test_definition(self, xp): + x = xp.asarray([0., 1, 2, 3, 4, -4, -3, -2, -1]) + y = xp.asarray([-4., -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + x = xp.asarray([0., 1, 2, 3, 4, -5, -4, -3, -2, -1]) + y = xp.asarray([-5., -4, -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + + def test_inverse(self, xp): + for n in [1, 4, 9, 100, 211]: + x = xp.asarray(np.random.random((n,))) + xp_assert_close(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self, xp): + freqs = xp.asarray([[0., 1, 2], [3, 4, -4], [-3, -2, -1]]) + shifted = xp.asarray([[-1., -3, -2], [2, 0, 1], [-4, 3, 4]]) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shifted) + xp_assert_close(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,))) + xp_assert_close(fft.ifftshift(shifted, axes=(0, 1)), freqs) + xp_assert_close(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + xp_assert_close(fft.fftshift(freqs), shifted) + xp_assert_close(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self, xp): + """ Test 2D input, which has uneven dimension sizes """ + freqs = xp.asarray([ + [0, 1], + [2, 3], + [4, 5] + ], dtype=xp.float64) + + # shift in dimension 0 + shift_dim0 = xp.asarray([ + [4, 5], + [0, 1], + [2, 3] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=0), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=0), freqs) + xp_assert_close(fft.fftshift(freqs, axes=(0,)), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = xp.asarray([ + [1, 0], + [3, 2], + [5, 4] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=1), shift_dim1) + xp_assert_close(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = xp.asarray([ + [5, 4], + [1, 0], + [3, 2] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + xp_assert_close(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + xp_assert_close(fft.fftshift(freqs, axes=None), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=None), freqs) + xp_assert_close(fft.fftshift(freqs), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both), freqs) + + +@skip_if_array_api('array_api_strict', 'cupy', + reasons=['fft not yet implemented by array-api-strict', + 'cupy.fft not yet implemented by array-api-compat']) +class TestFFTFreq: + + def test_definition(self, xp): + device = SCIPY_DEVICE + try: + x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], + dtype=xp.float64, device=device) + x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], + dtype=xp.float64, device=device) + except TypeError: + x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], + dtype=xp.float64) + + y = xp.asarray(9 * fft.fftfreq(9, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + y = xp.asarray(9 * xp.pi * fft.fftfreq(9, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + + y = xp.asarray(10 * fft.fftfreq(10, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + y = xp.asarray(10 * xp.pi * fft.fftfreq(10, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + + +@skip_if_array_api('array_api_strict', 'cupy', + reasons=['fft not yet implemented by array-api-strict', + 'cupy.fft not yet implemented by array-api-compat']) +class TestRFFTFreq: + + def test_definition(self, xp): + device = SCIPY_DEVICE + try: + x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64, device=device) + x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64, device=device) + except TypeError: + # work around the `device` keyword not being implemented in numpy yet + x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64) + + y = xp.asarray(9 * fft.rfftfreq(9, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + y = xp.asarray(9 * xp.pi * fft.rfftfreq(9, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x) + + y = xp.asarray(10 * fft.rfftfreq(10, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) + y = xp.asarray(10 * xp.pi * fft.rfftfreq(10, xp.pi, xp=xp), dtype=xp.float64) + xp_assert_close(y, x2) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py new file mode 100644 index 0000000000000000000000000000000000000000..e771aff63b173d2e939913ccc26467e80ba670c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py @@ -0,0 +1,83 @@ +from scipy import fft +import numpy as np +import pytest +from numpy.testing import assert_allclose +import multiprocessing +import os + + +@pytest.fixture(scope='module') +def x(): + return np.random.randn(512, 128) # Must be large enough to qualify for mt + + +@pytest.mark.parametrize("func", [ + fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn, + fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn, + fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn, + fft.dct, fft.idct, fft.dctn, fft.idctn, + fft.dst, fft.idst, fft.dstn, fft.idstn, +]) +@pytest.mark.parametrize("workers", [2, -1]) +def test_threaded_same(x, func, workers): + expected = func(x, workers=1) + actual = func(x, workers=workers) + assert_allclose(actual, expected) + + +def _mt_fft(x): + return fft.fft(x, workers=2) + + +def test_mixed_threads_processes(x): + # Test that the fft threadpool is safe to use before & after fork + + expect = fft.fft(x, workers=2) + + with multiprocessing.Pool(2) as p: + res = p.map(_mt_fft, [x for _ in range(4)]) + + for r in res: + assert_allclose(r, expect) + + fft.fft(x, workers=2) + + +def test_invalid_workers(x): + cpus = os.cpu_count() + + fft.ifft([1], workers=-cpus) + + with pytest.raises(ValueError, match='workers must not be zero'): + fft.fft(x, workers=0) + + with pytest.raises(ValueError, match='workers value out of range'): + fft.ifft(x, workers=-cpus-1) + + +def test_set_get_workers(): + cpus = os.cpu_count() + assert fft.get_workers() == 1 + with fft.set_workers(4): + assert fft.get_workers() == 4 + + with fft.set_workers(-1): + assert fft.get_workers() == cpus + + assert fft.get_workers() == 4 + + assert fft.get_workers() == 1 + + with fft.set_workers(-cpus): + assert fft.get_workers() == 1 + + +def test_set_workers_invalid(): + + with pytest.raises(ValueError, match='workers must not be zero'): + with fft.set_workers(0): + pass + + with pytest.raises(ValueError, match='workers value out of range'): + with fft.set_workers(-os.cpu_count()-1): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py new file mode 100644 index 0000000000000000000000000000000000000000..d874a708b9a22ba72be1e63a18a082298e84bbe8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py @@ -0,0 +1,753 @@ +""" +basinhopping: The basinhopping global optimization algorithm +""" +import numpy as np +import math +import inspect +import scipy.optimize +from scipy._lib._util import check_random_state + +__all__ = ['basinhopping'] + + +_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY), + inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY)) +_new_accept_test_signature = inspect.Signature(parameters=_params) + + +class Storage: + """ + Class used to store the lowest energy structure + """ + def __init__(self, minres): + self._add(minres) + + def _add(self, minres): + self.minres = minres + self.minres.x = np.copy(minres.x) + + def update(self, minres): + if minres.success and (minres.fun < self.minres.fun + or not self.minres.success): + self._add(minres) + return True + else: + return False + + def get_lowest(self): + return self.minres + + +class BasinHoppingRunner: + """This class implements the core of the basinhopping algorithm. + + x0 : ndarray + The starting coordinates. + minimizer : callable + The local minimizer, with signature ``result = minimizer(x)``. + The return value is an `optimize.OptimizeResult` object. + step_taking : callable + This function displaces the coordinates randomly. Signature should + be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. + accept_tests : list of callables + Each test is passed the kwargs `f_new`, `x_new`, `f_old` and + `x_old`. These tests will be used to judge whether or not to accept + the step. The acceptable return values are True, False, or ``"force + accept"``. If any of the tests return False then the step is rejected. + If ``"force accept"``, then this will override any other tests in + order to accept the step. This can be used, for example, to forcefully + escape from a local minimum that ``basinhopping`` is trapped in. + disp : bool, optional + Display status messages. + + """ + def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): + self.x = np.copy(x0) + self.minimizer = minimizer + self.step_taking = step_taking + self.accept_tests = accept_tests + self.disp = disp + + self.nstep = 0 + + # initialize return object + self.res = scipy.optimize.OptimizeResult() + self.res.minimization_failures = 0 + + # do initial minimization + minres = minimizer(self.x) + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + self.x = np.copy(minres.x) + self.energy = minres.fun + self.incumbent_minres = minres # best minimize result found so far + if self.disp: + print("basinhopping step %d: f %g" % (self.nstep, self.energy)) + + # initialize storage class + self.storage = Storage(minres) + + if hasattr(minres, "nfev"): + self.res.nfev = minres.nfev + if hasattr(minres, "njev"): + self.res.njev = minres.njev + if hasattr(minres, "nhev"): + self.res.nhev = minres.nhev + + def _monte_carlo_step(self): + """Do one Monte Carlo iteration + + Randomly displace the coordinates, minimize, and decide whether + or not to accept the new coordinates. + """ + # Take a random step. Make a copy of x because the step_taking + # algorithm might change x in place + x_after_step = np.copy(self.x) + x_after_step = self.step_taking(x_after_step) + + # do a local minimization + minres = self.minimizer(x_after_step) + x_after_quench = minres.x + energy_after_quench = minres.fun + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + if hasattr(minres, "nfev"): + self.res.nfev += minres.nfev + if hasattr(minres, "njev"): + self.res.njev += minres.njev + if hasattr(minres, "nhev"): + self.res.nhev += minres.nhev + + # accept the move based on self.accept_tests. If any test is False, + # then reject the step. If any test returns the special string + # 'force accept', then accept the step regardless. This can be used + # to forcefully escape from a local minimum if normal basin hopping + # steps are not sufficient. + accept = True + for test in self.accept_tests: + if inspect.signature(test) == _new_accept_test_signature: + testres = test(res_new=minres, res_old=self.incumbent_minres) + else: + testres = test(f_new=energy_after_quench, x_new=x_after_quench, + f_old=self.energy, x_old=self.x) + + if testres == 'force accept': + accept = True + break + elif testres is None: + raise ValueError("accept_tests must return True, False, or " + "'force accept'") + elif not testres: + accept = False + + # Report the result of the acceptance test to the take step class. + # This is for adaptive step taking + if hasattr(self.step_taking, "report"): + self.step_taking.report(accept, f_new=energy_after_quench, + x_new=x_after_quench, f_old=self.energy, + x_old=self.x) + + return accept, minres + + def one_cycle(self): + """Do one cycle of the basinhopping algorithm + """ + self.nstep += 1 + new_global_min = False + + accept, minres = self._monte_carlo_step() + + if accept: + self.energy = minres.fun + self.x = np.copy(minres.x) + self.incumbent_minres = minres # best minimize result found so far + new_global_min = self.storage.update(minres) + + # print some information + if self.disp: + self.print_report(minres.fun, accept) + if new_global_min: + print("found new global minimum on step %d with function" + " value %g" % (self.nstep, self.energy)) + + # save some variables as BasinHoppingRunner attributes + self.xtrial = minres.x + self.energy_trial = minres.fun + self.accept = accept + + return new_global_min + + def print_report(self, energy_trial, accept): + """print a status update""" + minres = self.storage.get_lowest() + print("basinhopping step %d: f %g trial_f %g accepted %d " + " lowest_f %g" % (self.nstep, self.energy, energy_trial, + accept, minres.fun)) + + +class AdaptiveStepsize: + """ + Class to implement adaptive stepsize. + + This class wraps the step taking class and modifies the stepsize to + ensure the true acceptance rate is as close as possible to the target. + + Parameters + ---------- + takestep : callable + The step taking routine. Must contain modifiable attribute + takestep.stepsize + accept_rate : float, optional + The target step acceptance rate + interval : int, optional + Interval for how often to update the stepsize + factor : float, optional + The step size is multiplied or divided by this factor upon each + update. + verbose : bool, optional + Print information about each update + + """ + def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, + verbose=True): + self.takestep = takestep + self.target_accept_rate = accept_rate + self.interval = interval + self.factor = factor + self.verbose = verbose + + self.nstep = 0 + self.nstep_tot = 0 + self.naccept = 0 + + def __call__(self, x): + return self.take_step(x) + + def _adjust_step_size(self): + old_stepsize = self.takestep.stepsize + accept_rate = float(self.naccept) / self.nstep + if accept_rate > self.target_accept_rate: + # We're accepting too many steps. This generally means we're + # trapped in a basin. Take bigger steps. + self.takestep.stepsize /= self.factor + else: + # We're not accepting enough steps. Take smaller steps. + self.takestep.stepsize *= self.factor + if self.verbose: + print("adaptive stepsize: acceptance rate {:f} target {:f} new " + "stepsize {:g} old stepsize {:g}".format(accept_rate, + self.target_accept_rate, self.takestep.stepsize, + old_stepsize)) + + def take_step(self, x): + self.nstep += 1 + self.nstep_tot += 1 + if self.nstep % self.interval == 0: + self._adjust_step_size() + return self.takestep(x) + + def report(self, accept, **kwargs): + "called by basinhopping to report the result of the step" + if accept: + self.naccept += 1 + + +class RandomDisplacement: + """Add a random displacement of maximum size `stepsize` to each coordinate. + + Calling this updates `x` in-place. + + Parameters + ---------- + stepsize : float, optional + Maximum stepsize in any dimension + random_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + + def __init__(self, stepsize=0.5, random_gen=None): + self.stepsize = stepsize + self.random_gen = check_random_state(random_gen) + + def __call__(self, x): + x += self.random_gen.uniform(-self.stepsize, self.stepsize, + np.shape(x)) + return x + + +class MinimizerWrapper: + """ + wrap a minimizer function as a minimizer class + """ + def __init__(self, minimizer, func=None, **kwargs): + self.minimizer = minimizer + self.func = func + self.kwargs = kwargs + + def __call__(self, x0): + if self.func is None: + return self.minimizer(x0, **self.kwargs) + else: + return self.minimizer(self.func, x0, **self.kwargs) + + +class Metropolis: + """Metropolis acceptance criterion. + + Parameters + ---------- + T : float + The "temperature" parameter for the accept or reject criterion. + random_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Random number generator used for acceptance test. + + """ + + def __init__(self, T, random_gen=None): + # Avoid ZeroDivisionError since "MBH can be regarded as a special case + # of the BH framework with the Metropolis criterion, where temperature + # T = 0." (Reject all steps that increase energy.) + self.beta = 1.0 / T if T != 0 else float('inf') + self.random_gen = check_random_state(random_gen) + + def accept_reject(self, res_new, res_old): + """ + Assuming the local search underlying res_new was successful: + If new energy is lower than old, it will always be accepted. + If new is higher than old, there is a chance it will be accepted, + less likely for larger differences. + """ + with np.errstate(invalid='ignore'): + # The energy values being fed to Metropolis are 1-length arrays, and if + # they are equal, their difference is 0, which gets multiplied by beta, + # which is inf, and array([0]) * float('inf') causes + # + # RuntimeWarning: invalid value encountered in multiply + # + # Ignore this warning so when the algorithm is on a flat plane, it always + # accepts the step, to try to move off the plane. + prod = -(res_new.fun - res_old.fun) * self.beta + w = math.exp(min(0, prod)) + + rand = self.random_gen.uniform() + return w >= rand and (res_new.success or not res_old.success) + + def __call__(self, *, res_new, res_old): + """ + f_new and f_old are mandatory in kwargs + """ + return bool(self.accept_reject(res_new, res_old)) + + +def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, + minimizer_kwargs=None, take_step=None, accept_test=None, + callback=None, interval=50, disp=False, niter_success=None, + seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9): + """Find the global minimum of a function using the basin-hopping algorithm. + + Basin-hopping is a two-phase method that combines a global stepping + algorithm with local minimization at each step. Designed to mimic + the natural process of energy minimization of clusters of atoms, it works + well for similar problems with "funnel-like, but rugged" energy landscapes + [5]_. + + As the step-taking, step acceptance, and minimization methods are all + customizable, this function can also be used to implement other two-phase + methods. + + Parameters + ---------- + func : callable ``f(x, *args)`` + Function to be optimized. ``args`` can be passed as an optional item + in the dict `minimizer_kwargs` + x0 : array_like + Initial guess. + niter : integer, optional + The number of basin-hopping iterations. There will be a total of + ``niter + 1`` runs of the local minimizer. + T : float, optional + The "temperature" parameter for the acceptance or rejection criterion. + Higher "temperatures" mean that larger jumps in function value will be + accepted. For best results `T` should be comparable to the + separation (in function value) between local minima. + stepsize : float, optional + Maximum step size for use in the random displacement. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + `scipy.optimize.minimize` Some important options could be: + + method : str + The minimization method (e.g. ``"L-BFGS-B"``) + args : tuple + Extra arguments passed to the objective function (`func`) and + its derivatives (Jacobian, Hessian). + + take_step : callable ``take_step(x)``, optional + Replace the default step-taking routine with this routine. The default + step-taking routine is a random displacement of the coordinates, but + other step-taking algorithms may be better for some systems. + `take_step` can optionally have the attribute ``take_step.stepsize``. + If this attribute exists, then `basinhopping` will adjust + ``take_step.stepsize`` in order to try to optimize the global minimum + search. + accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional + Define a test which will be used to judge whether to accept the + step. This will be used in addition to the Metropolis test based on + "temperature" `T`. The acceptable return values are True, + False, or ``"force accept"``. If any of the tests return False + then the step is rejected. If the latter, then this will override any + other tests in order to accept the step. This can be used, for example, + to forcefully escape from a local minimum that `basinhopping` is + trapped in. + callback : callable, ``callback(x, f, accept)``, optional + A callback function which will be called for all minima found. ``x`` + and ``f`` are the coordinates and function value of the trial minimum, + and ``accept`` is whether that minimum was accepted. This can + be used, for example, to save the lowest N minima found. Also, + `callback` can be used to specify a user defined stop criterion by + optionally returning True to stop the `basinhopping` routine. + interval : integer, optional + interval for how often to update the `stepsize` + disp : bool, optional + Set to True to print status messages + niter_success : integer, optional + Stop the run if the global minimum candidate remains the same for this + number of iterations. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the default Metropolis + `accept_test` and the default `take_step`. If you supply your own + `take_step` and `accept_test`, and these functions use random + number generation, then those functions are responsible for the state + of their random number generator. + target_accept_rate : float, optional + The target acceptance rate that is used to adjust the `stepsize`. + If the current acceptance rate is greater than the target, + then the `stepsize` is increased. Otherwise, it is decreased. + Range is (0, 1). Default is 0.5. + + .. versionadded:: 1.8.0 + + stepwise_factor : float, optional + The `stepsize` is multiplied or divided by this stepwise factor upon + each update. Range is (0, 1). Default is 0.9. + + .. versionadded:: 1.8.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. The ``OptimizeResult`` object returned by the + selected minimizer at the lowest minimum is also contained within this + object and can be accessed through the ``lowest_optimization_result`` + attribute. See `OptimizeResult` for a description of other attributes. + + See Also + -------- + minimize : + The local minimization function called once for each basinhopping step. + `minimizer_kwargs` is passed to this routine. + + Notes + ----- + Basin-hopping is a stochastic algorithm which attempts to find the global + minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ + [4]_. The algorithm in its current form was described by David Wales and + Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. + + The algorithm is iterative with each cycle composed of the following + features + + 1) random perturbation of the coordinates + + 2) local minimization + + 3) accept or reject the new coordinates based on the minimized function + value + + The acceptance test used here is the Metropolis criterion of standard Monte + Carlo algorithms, although there are many other possibilities [3]_. + + This global minimization method has been shown to be extremely efficient + for a wide variety of problems in physics and chemistry. It is + particularly useful when the function has many minima separated by large + barriers. See the `Cambridge Cluster Database + `_ for databases of molecular + systems that have been optimized primarily using basin-hopping. This + database includes minimization problems exceeding 300 degrees of freedom. + + See the free software program `GMIN `_ + for a Fortran implementation of basin-hopping. This implementation has many + variations of the procedure described above, including more + advanced step taking algorithms and alternate acceptance criterion. + + For stochastic global optimization there is no way to determine if the true + global minimum has actually been found. Instead, as a consistency check, + the algorithm can be run from a number of different random starting points + to ensure the lowest minimum found in each example has converged to the + global minimum. For this reason, `basinhopping` will by default simply + run for the number of iterations `niter` and return the lowest minimum + found. It is left to the user to ensure that this is in fact the global + minimum. + + Choosing `stepsize`: This is a crucial parameter in `basinhopping` and + depends on the problem being solved. The step is chosen uniformly in the + region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it + should be comparable to the typical separation (in argument values) between + local minima of the function being optimized. `basinhopping` will, by + default, adjust `stepsize` to find an optimal value, but this may take + many iterations. You will get quicker results if you set a sensible + initial value for ``stepsize``. + + Choosing `T`: The parameter `T` is the "temperature" used in the + Metropolis criterion. Basinhopping steps are always accepted if + ``func(xnew) < func(xold)``. Otherwise, they are accepted with + probability:: + + exp( -(func(xnew) - func(xold)) / T ) + + So, for best results, `T` should to be comparable to the typical + difference (in function values) between local minima. (The height of + "walls" between local minima is irrelevant.) + + If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all + steps that increase energy are rejected. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, + Cambridge, UK. + .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and + the Lowest Energy Structures of Lennard-Jones Clusters Containing up to + 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. + .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the + multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, + 1987, 84, 6611. + .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, + crystals, and biomolecules, Science, 1999, 285, 1368. + .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as + a General and Versatile Optimization Framework for the Characterization + of Biological Macromolecules, Advances in Artificial Intelligence, + Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` + + Examples + -------- + The following example is a 1-D minimization problem, with many + local minima superimposed on a parabola. + + >>> import numpy as np + >>> from scipy.optimize import basinhopping + >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x + >>> x0 = [1.] + + Basinhopping, internally, uses a local minimization algorithm. We will use + the parameter `minimizer_kwargs` to tell basinhopping which algorithm to + use and how to set up that minimizer. This parameter will be passed to + `scipy.optimize.minimize`. + + >>> minimizer_kwargs = {"method": "BFGS"} + >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = %.4f, f(x) = %.4f" % (ret.x, ret.fun)) + global minimum: x = -0.1951, f(x) = -1.0009 + + Next consider a 2-D minimization problem. Also, this time, we + will use gradient information to significantly speed up the search. + + >>> def func2d(x): + ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + + ... 0.2) * x[0] + ... df = np.zeros(2) + ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + ... df[1] = 2. * x[1] + 0.2 + ... return f, df + + We'll also use a different local minimization algorithm. Also, we must tell + the minimizer that our function returns both energy and gradient (Jacobian). + + >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} + >>> x0 = [1.0, 1.0] + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Here is an example using a custom step-taking routine. Imagine you want + the first coordinate to take larger steps than the rest of the coordinates. + This can be implemented like so: + + >>> class MyTakeStep: + ... def __init__(self, stepsize=0.5): + ... self.stepsize = stepsize + ... self.rng = np.random.default_rng() + ... def __call__(self, x): + ... s = self.stepsize + ... x[0] += self.rng.uniform(-2.*s, 2.*s) + ... x[1:] += self.rng.uniform(-s, s, x[1:].shape) + ... return x + + Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude + of `stepsize` to optimize the search. We'll use the same 2-D function as + before + + >>> mytakestep = MyTakeStep() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200, take_step=mytakestep) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Now, let's do an example using a custom callback function which prints the + value of every minimum found + + >>> def print_fun(x, f, accepted): + ... print("at minimum %.4f accepted %d" % (f, int(accepted))) + + We'll run it for only 10 basinhopping steps this time. + + >>> rng = np.random.default_rng() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=10, callback=print_fun, seed=rng) + at minimum 0.4159 accepted 1 + at minimum -0.4317 accepted 1 + at minimum -1.0109 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.1021 accepted 1 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + + The minimum at -1.0109 is actually the global minimum, found already on the + 8th iteration. + + """ # numpy/numpydoc#87 # noqa: E501 + if target_accept_rate <= 0. or target_accept_rate >= 1.: + raise ValueError('target_accept_rate has to be in range (0, 1)') + if stepwise_factor <= 0. or stepwise_factor >= 1.: + raise ValueError('stepwise_factor has to be in range (0, 1)') + + x0 = np.array(x0) + + # set up the np.random generator + rng = check_random_state(seed) + + # set up minimizer + if minimizer_kwargs is None: + minimizer_kwargs = dict() + wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, + **minimizer_kwargs) + + # set up step-taking algorithm + if take_step is not None: + if not callable(take_step): + raise TypeError("take_step must be callable") + # if take_step.stepsize exists then use AdaptiveStepsize to control + # take_step.stepsize + if hasattr(take_step, "stepsize"): + take_step_wrapped = AdaptiveStepsize( + take_step, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + else: + take_step_wrapped = take_step + else: + # use default + displace = RandomDisplacement(stepsize=stepsize, random_gen=rng) + take_step_wrapped = AdaptiveStepsize(displace, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + + # set up accept tests + accept_tests = [] + if accept_test is not None: + if not callable(accept_test): + raise TypeError("accept_test must be callable") + accept_tests = [accept_test] + + # use default + metropolis = Metropolis(T, random_gen=rng) + accept_tests.append(metropolis) + + if niter_success is None: + niter_success = niter + 2 + + bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, + accept_tests, disp=disp) + + # The wrapped minimizer is called once during construction of + # BasinHoppingRunner, so run the callback + if callable(callback): + callback(bh.storage.minres.x, bh.storage.minres.fun, True) + + # start main iteration loop + count, i = 0, 0 + message = ["requested number of basinhopping iterations completed" + " successfully"] + for i in range(niter): + new_global_min = bh.one_cycle() + + if callable(callback): + # should we pass a copy of x? + val = callback(bh.xtrial, bh.energy_trial, bh.accept) + if val is not None: + if val: + message = ["callback function requested stop early by" + "returning True"] + break + + count += 1 + if new_global_min: + count = 0 + elif count > niter_success: + message = ["success condition satisfied"] + break + + # prepare return object + res = bh.res + res.lowest_optimization_result = bh.storage.get_lowest() + res.x = np.copy(res.lowest_optimization_result.x) + res.fun = res.lowest_optimization_result.fun + res.message = message + res.nit = i + 1 + res.success = res.lowest_optimization_result.success + return res diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3a22e9f404e7537b549f3c3a85a4f4529f8c0e6f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bracket.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7726c234a0d50237e3e3a4f5e1c7f0681dc601 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_bracket.py @@ -0,0 +1,663 @@ +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +_ELIMITS = -1 # used in _bracket_root +_ESTOPONESIDE = 2 # used in _bracket_root + +def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xl0 = np.asarray(xl0)[()] + if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any(): + raise ValueError('`xl0` must be numeric and real.') + + xr0 = xl0 + 1 if xr0 is None else xr0 + xmin = -np.inf if xmin is None else xmin + xmax = np.inf if xmax is None else xmax + factor = 2. if factor is None else factor + xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor) + + if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any(): + raise ValueError('`xr0` must be numeric and real.') + + if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any(): + raise ValueError('`xmin` must be numeric and real.') + + if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any(): + raise ValueError('`xmax` must be numeric and real.') + + if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any(): + raise ValueError('`factor` must be numeric and real.') + if not np.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + maxiter = np.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple() + or np.iscomplex(maxiter)): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + if not np.all((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)): + raise ValueError('`xmin <= xl0 < xr0 <= xmax` must be True (elementwise).') + + return func, xl0, xr0, xmin, xmax, factor, args, maxiter + + +def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None, + args=(), maxiter=1000): + """Bracket the root of a monotonic scalar function of one variable + + This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and + the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the root is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + xl0, xr0: float array_like + Starting guess of bracket, which need not contain a root. If `xr0` is + not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0` and `xr0`. + factor : float array_like, default: 2 + The factor used to grow the bracket. See notes for details. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be + bracketed requires arguments that are not broadcastable with these + arrays, wrap that callable with `func` such that `func` accepts + only `x` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xr : float + The lower and upper ends of the bracket, if the algorithm + terminated successfully. + fl, fr : float + The function value at the lower and upper ends of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + This is distinct from the number of times `func` is *called* + because the function may evaluated at multiple points in a single + call. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits without finding a bracket. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + - ``2`` : A bracket was found in the opposite search direction (in `callback` only). + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + This function generalizes an algorithm found in pieces throughout + `scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)` + until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows. + + - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively + increased by `factor`. + - If `xmin` is provided, the distance between `xmin` and `l` is iteratively + decreased by `factor`. Note that this also *increases* the bracket size. + + Growth of the bracket to the right is analogous. + + Growth of the bracket in one direction stops when the endpoint is no longer + finite, the function value at the endpoint is no longer finite, or the + endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates + when the bracket stops growing in both directions, the bracket surrounds + the root, or a root is found (accidentally). + + If two brackets are found - that is, a bracket is found on both sides in + the same iteration, the smaller of the two is returned. + If roots of the function are found, both `l` and `r` are set to the + leftmost root. + + """ # noqa: E501 + # Todo: + # - find bracket with sign change in specified direction + # - Add tolerance + # - allow factor < 1? + + callback = None # works; I just don't want to test it + temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp + + xs = (xl0, xr0) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype = temp # line split for PEP8 + + # The approach is to treat the left and right searches as though they were + # (almost) totally independent one-sided bracket searches. (The interaction + # is considered when checking for termination and preparing the result + # object.) + # `x` is the "moving" end of the bracket + x = np.concatenate(xs) + f = np.concatenate(fs) + n = len(x) // 2 + + # `x_last` is the previous location of the moving end of the bracket. If + # the signs of `f` and `f_last` are different, `x` and `x_last` form a + # bracket. + x_last = np.concatenate((x[n:], x[:n])) + f_last = np.concatenate((f[n:], f[:n])) + # `x0` is the "fixed" end of the bracket. + x0 = x_last + # We don't need to retain the corresponding function value, since the + # fixed end of the bracket is only needed to compute the new value of the + # moving end; it is never returned. + + xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel() + xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel() + limit = np.concatenate((xmin, xmax)) + + factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel() + factor = np.concatenate((factor, factor)) + + active = np.arange(2*n) + args = [np.concatenate((arg, arg)) for arg in args] + + # This is needed due to inner workings of `eim._loop`. + # We're abusing it a tiny bit. + shape = shape + (2,) + + # `d` is for "distance". + # For searches without a limit, the distance between the fixed end of the + # bracket `x0` and the moving end `x` will grow by `factor` each iteration. + # For searches with a limit, the distance between the `limit` and moving + # end of the bracket `x` will shrink by `factor` each iteration. + i = np.isinf(limit) + ni = ~i + d = np.zeros_like(x) + d[i] = x[i] - x0[i] + d[ni] = limit[ni] - x[ni] + + status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 1 # one function evaluation per side performed above + + work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor, + active=active, d=d, x_last=x_last, f_last=f_last, + nit=nit, nfev=nfev, status=status, args=args, + xl=None, xr=None, fl=None, fr=None, n=n) + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), + ('fr', 'fr'), ('x', 'x'), ('f', 'f'), + ('x_last', 'x_last'), ('f_last', 'f_last')] + + def pre_func_eval(work): + # Initialize moving end of bracket + x = np.zeros_like(work.x) + + # Unlimited brackets grow by `factor` by increasing distance from fixed + # end to moving end. + i = np.isinf(work.limit) # indices of unlimited brackets + work.d[i] *= work.factor[i] + x[i] = work.x0[i] + work.d[i] + + # Limited brackets grow by decreasing the distance from the limit to + # the moving end. + ni = ~i # indices of limited brackets + work.d[ni] /= work.factor[ni] + x[ni] = work.limit[ni] - work.d[ni] + + return x + + def post_func_eval(x, f, work): + # Keep track of the previous location of the moving end so that we can + # return a narrower bracket. (The alternative is to remember the + # original fixed end, but then the bracket would be wider than needed.) + work.x_last = work.x + work.f_last = work.f + work.x = x + work.f = f + + def check_termination(work): + stop = np.zeros_like(work.x, dtype=bool) + + # Condition 1: a valid bracket (or the root itself) has been found + sf = np.sign(work.f) + sf_last = np.sign(work.f_last) + i = (sf_last == -sf) | (sf_last == 0) | (sf == 0) + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Condition 2: the other side's search found a valid bracket. + # (If we just found a bracket with the rightward search, we can stop + # the leftward search, and vice-versa.) + # To do this, we need to set the status of the other side's search; + # this is tricky because `work.status` contains only the *active* + # elements, so we don't immediately know the index of the element we + # need to set - or even if it's still there. (That search may have + # terminated already, e.g. by reaching its `limit`.) + # To facilitate this, `work.active` contains a unit integer index of + # each search. Index `k` (`k < n)` and `k + n` correspond with a + # leftward and rightward search, respectively. Elements are removed + # from `work.active` just as they are removed from `work.status`, so + # we use `work.active` to help find the right location in + # `work.status`. + # Get the integer indices of the elements that can also stop + also_stop = (work.active[i] + work.n) % (2*work.n) + # Check whether they are still active. + # To start, we need to find out where in `work.active` they would + # appear if they are indeed there. + j = np.searchsorted(work.active, also_stop) + # If the location exceeds the length of the `work.active`, they are + # not there. + j = j[j < len(work.active)] + # Check whether they are still there. + j = j[also_stop == work.active[j]] + # Now convert these to boolean indices to use with `work.status`. + i = np.zeros_like(stop) + i[j] = True # boolean indices of elements that can also stop + i = i & ~stop + work.status[i] = _ESTOPONESIDE + stop[i] = True + + # Condition 3: moving end of bracket reaches limit + i = (work.x == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 4: non-finite value encountered + i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + n = len(res['x']) // 2 + + # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint + # as `a` and `xr0`, the initial right endpoint, as `b`. + # Because we treat the two one-sided searches as though they were + # independent, what we keep track of in `work` and what we want to + # return in `res` look quite different. Combine the results from the + # two one-sided searches before reporting the results to the user. + # - "a" refers to the leftward search (the moving end started at `a`) + # - "b" refers to the rightward search (the moving end started at `b`) + # - "l" refers to the left end of the bracket (closer to -oo) + # - "r" refers to the right end of the bracket (closer to +oo) + xal = res['x'][:n] + xar = res['x_last'][:n] + xbl = res['x_last'][n:] + xbr = res['x'][n:] + + fal = res['f'][:n] + far = res['f_last'][:n] + fbl = res['f_last'][n:] + fbr = res['f'][n:] + + # Initialize the brackets and corresponding function values to return + # to the user. Brackets may not be valid (e.g. there is no root, + # there weren't enough iterations, NaN encountered), but we still need + # to return something. One option would be all NaNs, but what I've + # chosen here is the left- and right-most points at which the function + # has been evaluated. This gives the user some information about what + # interval of the real line has been searched and shows that there is + # no sign change between the two ends. + xl = xal.copy() + fl = fal.copy() + xr = xbr.copy() + fr = fbr.copy() + + # `status` indicates whether the bracket is valid or not. If so, + # we want to adjust the bracket we return to be the narrowest possible + # given the points at which we evaluated the function. + # For example if bracket "a" is valid and smaller than bracket "b" OR + # if bracket "a" is valid and bracket "b" is not valid, we want to + # return bracket "a" (and vice versa). + sa = res['status'][:n] + sb = res['status'][n:] + + da = xar - xal + db = xbr - xbl + + i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0)) + i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0)) + + xr[i1] = xar[i1] + fr[i1] = far[i1] + xl[i2] = xbl[i2] + fl[i2] = fbl[i2] + + # Finish assembling the result object + res['xl'] = xl + res['xr'] = xr + res['fl'] = fl + res['fr'] = fr + + res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:]) + res['nfev'] = res['nfev'][:n] + res['nfev'][n:] + # If the status on one side is zero, the status is zero. In any case, + # report the status from one side only. + res['status'] = np.choose(sa == 0, (sb, sa)) + res['success'] = (res['status'] == 0) + + del res['x'] + del res['f'] + del res['x_last'] + del res['f_last'] + + return shape[:-1] + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) + + +def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xm0 = np.asarray(xm0)[()] + if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any(): + raise ValueError('`xm0` must be numeric and real.') + + xmin = -np.inf if xmin is None else xmin + xmax = np.inf if xmax is None else xmax + + xl0_not_supplied = False + if xl0 is None: + xl0 = xm0 - 0.5 + xl0_not_supplied = True + + xr0_not_supplied = False + if xr0 is None: + xr0 = xm0 + 0.5 + xr0_not_supplied = True + + factor = 2.0 if factor is None else factor + xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays( + xl0, xm0, xr0, xmin, xmax, factor + ) + + if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any(): + raise ValueError('`xl0` must be numeric and real.') + + if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any(): + raise ValueError('`xr0` must be numeric and real.') + + if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any(): + raise ValueError('`xmin` must be numeric and real.') + + if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any(): + raise ValueError('`xmax` must be numeric and real.') + + if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any(): + raise ValueError('`factor` must be numeric and real.') + if not np.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + # Default choices for xl or xr might have exceeded xmin or xmax. Adjust + # to make sure this doesn't happen. We replace with copies because xl, and xr + # are read-only views produced by broadcast_arrays. + if xl0_not_supplied: + xl0 = xl0.copy() + cond = ~np.isinf(xmin) & (xl0 < xmin) + xl0[cond] = ( + xm0[cond] - xmin[cond] + ) / np.array(16, dtype=xl0.dtype) + if xr0_not_supplied: + xr0 = xr0.copy() + cond = ~np.isinf(xmax) & (xmax < xr0) + xr0[cond] = ( + xmax[cond] - xm0[cond] + ) / np.array(16, dtype=xr0.dtype) + + maxiter = np.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple() + or np.iscomplex(maxiter)): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + if not np.all((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)): + raise ValueError( + '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True (elementwise).' + ) + + return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter + + +def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None, + factor=None, args=(), maxiter=1000): + """Bracket the minimum of a unimodal scalar function of one variable + + This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`, + and the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the minimum is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. `func` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`. + xm0: float array_like + Starting guess for middle point of bracket. + xl0, xr0: float array_like, optional + Starting guesses for left and right endpoints of the bracket. Must be + broadcastable with one another and with `xm0`. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0`, `xm0`, and `xr0`. + factor : float array_like, optional + Controls expansion of bracket endpoint in downhill direction. Works + differently in the cases where a limit is set in the downhill direction + with `xmax` or `xmin`. See Notes. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the + callable to be bracketed requires arguments that are not broadcastable + with these arrays, wrap that callable with `func` such that `func` + accepts only ``x`` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. The number + of function evaluations is three greater than the number of iterations. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xm, xr : float + The left, middle, and right points of the bracket, if the algorithm + terminated successfully. + fl, fm, fr : float + The function value at the left, middle, and right points of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits. Assuming + unimodality, this implies the endpoint at the limit is a + minimizer. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + Similar to `scipy.optimize.bracket`, this function seeks to find real + points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``, + where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`, + this function can operate in a vectorized manner on array input, so long as + the input arrays are broadcastable with each other. Also unlike + `scipy.optimize.bracket`, users may specify minimum and maximum endpoints + for the desired bracket. + + Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``, + the algorithm checks if these points already give a valid bracket. If not, + a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new + opposite endpoint, and either `xl` or `xr` becomes the new middle point, + depending on which direction is downhill. The algorithm repeats from here. + + The new endpoint `w` is chosen differently depending on whether or not a + boundary `xmin` or `xmax` has been set in the downhill direction. Without + loss of generality, suppose the downhill direction is to the right, so that + ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w` + is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by + the user (defaults to 2.0) so that step sizes increase in geometric proportion. + If there is a boundary, `xmax` in this case, then `w` is chosen to be + ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at + `xmax`. This cautious approach ensures that a minimum near but distinct from + the boundary isn't missed while also detecting whether or not the `xmax` is + a minimizer when `xmax` is reached after a finite number of steps. + """ # noqa: E501 + callback = None # works; I just don't want to test it + + temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp + + xs = (xl0, xm0, xr0) + func, xs, fs, args, shape, dtype = eim._initialize(func, xs, args) + + xl0, xm0, xr0 = xs + fl0, fm0, fr0 = fs + xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel() + xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel() + # We will modify factor later on so make a copy. np.broadcast_to returns + # a read-only view. + factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel() + + # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be + # marching downhill in the direction from xl to xr. + comp = fl0 < fr0 + xl0[comp], xr0[comp] = xr0[comp], xl0[comp] + fl0[comp], fr0[comp] = fr0[comp], fl0[comp] + # We only need the boundary in the direction we're traveling. + limit = np.where(comp, xmin, xmax) + + unlimited = np.isinf(limit) + limited = ~unlimited + step = np.empty_like(xl0) + + step[unlimited] = (xr0[unlimited] - xm0[unlimited]) + step[limited] = (limit[limited] - xr0[limited]) + + # Step size is divided by factor for case where there is a limit. + factor[limited] = 1 / factor[limited] + + status = np.full_like(xl0, eim._EINPROGRESS, dtype=int) + nit, nfev = 0, 3 + + work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0, + step=step, limit=limit, limited=limited, factor=factor, nit=nit, + nfev=nfev, status=status, args=args) + + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'), + ('fr', 'fr')] + + def pre_func_eval(work): + work.step *= work.factor + x = np.empty_like(work.xr) + x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited] + x[work.limited] = work.limit[work.limited] - work.step[work.limited] + # Since the new bracket endpoint is calculated from an offset with the + # limit, it may be the case that the new endpoint equals the old endpoint, + # when the old endpoint is sufficiently close to the limit. We use the + # limit itself as the new endpoint in these cases. + x[work.limited] = np.where( + x[work.limited] == work.xr[work.limited], + work.limit[work.limited], + x[work.limited], + ) + return x + + def post_func_eval(x, f, work): + work.xl, work.xm, work.xr = work.xm, work.xr, x + work.fl, work.fm, work.fr = work.fm, work.fr, f + + def check_termination(work): + # Condition 1: A valid bracket has been found. + stop = ( + (work.fl >= work.fm) & (work.fr > work.fm) + | (work.fl > work.fm) & (work.fr >= work.fm) + ) + work.status[stop] = eim._ECONVERGED + + # Condition 2: Moving end of bracket reaches limit. + i = (work.xr == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 3: non-finite value encountered + i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0). + comp = res['xl'] > res['xr'] + res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp] + res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp] + return shape + + return eim._loop(work, callback, shape, + maxiter, func, args, dtype, + pre_func_eval, post_func_eval, + check_termination, post_termination_check, + customize_result, res_work_pairs) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..02cc746b1a825bb4c419e11d54da5bbc9d43cc1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py @@ -0,0 +1,524 @@ +import numpy as np +from ._zeros_py import _xtol, _rtol, _iter +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol, + fatol=None, frtol=0, maxiter=_iter, callback=None): + """Find the root of an elementwise function using Chandrupatla's algorithm. + + For each element of the output of `func`, `chandrupatla` seeks the scalar + root that makes the element 0. This function allows for `a`, `b`, and the + output of `func` to be of any broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose root is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of components of any type(s). + ``func`` must be an elementwise function: each element ``func(x)[i]`` + must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla` + seeks an array ``x`` such that ``func(x)`` is an array of zeros. + a, b : array_like + The lower and upper bounds of the root of the function. Must be + broadcastable with one another. + args : tuple, optional + Additional positional arguments to be passed to `func`. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the root and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + x : float + The root of the function, if the algorithm terminated successfully. + nfev : int + The number of times the function was called to find the root. + nit : int + The number of iterations of Chandrupatla's algorithm performed. + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + fun : float + The value of `func` evaluated at `x`. + xl, xr : float + The lower and upper ends of the bracket. + fl, fr : float + The function value at the lower and upper ends of the bracket. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``xl`` and ``xr`` are the left and right ends of the bracket, + ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``, + and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to + have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or + ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the + termination condition described in [1]_ with ``xrtol = 4e-10``, + ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are + ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``, + and ``fatol`` is the smallest normal number of the ``dtype`` returned + by ``func``. + + References + ---------- + + .. [1] Chandrupatla, Tirupathi R. + "A new hybrid quadratic/bisection algorithm for finding the zero of a + nonlinear function without using derivatives". + Advances in Engineering Software, 28(3), 145-149. + https://doi.org/10.1016/s0965-9978(96)00051-8 + + See Also + -------- + brentq, brenth, ridder, bisect, newton + + Examples + -------- + >>> from scipy import optimize + >>> def f(x, c): + ... return x**3 - 2*x - c + >>> c = 5 + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + 2.0945514818937463 + + >>> c = [3, 4, 5] + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + array([1.8932892 , 2. , 2.09455148]) + + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + temp = eim._initialize(func, (a, b), args) + func, xs, fs, args, shape, dtype = temp + x1, x2 = xs + f1, f2 = fs + status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 2 # two function evaluations performed above + xatol = _xtol if xatol is None else xatol + xrtol = _rtol if xrtol is None else xrtol + fatol = np.finfo(dtype).tiny if fatol is None else fatol + frtol = frtol * np.minimum(np.abs(f1), np.abs(f2)) + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status) + res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'), + ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'), + ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')] + + def pre_func_eval(work): + # [1] Figure 1 (first box) + x = work.x1 + work.t * (work.x2 - work.x1) + return x + + def post_func_eval(x, f, work): + # [1] Figure 1 (first diamond and boxes) + # Note: y/n are reversed in figure; compare to BASIC in appendix + work.x3, work.f3 = work.x2.copy(), work.f2.copy() + j = np.sign(f) == np.sign(work.f1) + nj = ~j + work.x3[j], work.f3[j] = work.x1[j], work.f1[j] + work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj] + work.x1, work.f1 = x, f + + def check_termination(work): + # [1] Figure 1 (second diamond) + # Check for all terminal conditions and record statuses. + + # See [1] Section 4 (first two sentences) + i = np.abs(work.f1) < np.abs(work.f2) + work.xmin = np.choose(i, (work.x2, work.x1)) + work.fmin = np.choose(i, (work.f2, work.f1)) + stop = np.zeros_like(work.x1, dtype=bool) # termination condition met + + # This is the convergence criterion used in bisect. Chandrupatla's + # criterion is equivalent to this except with a factor of 4 on `xrtol`. + work.dx = abs(work.x2 - work.x1) + work.tol = abs(work.xmin) * work.xrtol + work.xatol + i = work.dx < work.tol + # Modify in place to incorporate tolerance on function value. Note that + # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`, + # where `f1` and `f2` are the function evaluated at the original ends of + # the bracket. + i |= np.abs(work.fmin) <= work.fatol + work.frtol + work.status[i] = eim._ECONVERGED + stop[i] = True + + i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop + work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._ESIGNERR + stop[i] = True + + i = ~((np.isfinite(work.x1) & np.isfinite(work.x2) + & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop) + work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + # [1] Figure 1 (third diamond and boxes / Equation 1) + xi1 = (work.x1 - work.x2) / (work.x3 - work.x2) + phi1 = (work.f1 - work.f2) / (work.f3 - work.f2) + alpha = (work.x3 - work.x1) / (work.x2 - work.x1) + j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1)) + + f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j] + t = np.full_like(alpha, 0.5) + t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j) + - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j)) + + # [1] Figure 1 (last box; see also BASIC in appendix with comment + # "Adjust T Away from the Interval Boundary") + tl = 0.5 * work.tol / work.dx + work.t = np.clip(t, tl, 1 - tl) + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] < res['xr'] + res['xl'] = np.choose(i, (xr, xl)) + res['xr'] = np.choose(i, (xl, xr)) + res['fl'] = np.choose(i, (fr, fl)) + res['fr'] = np.choose(i, (fl, fr)) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) + + +def _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback): + # Input validation for `_chandrupatla` + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + tols = np.asarray([xatol if xatol is not None else 1, + xrtol if xrtol is not None else 1, + fatol if fatol is not None else 1, + frtol if frtol is not None else 1]) + if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0) + or np.any(np.isnan(tols)) or tols.shape != (4,)): + raise ValueError('Tolerances must be non-negative scalars.') + + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter < 0: + raise ValueError('`maxiter` must be a non-negative integer.') + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return func, args, xatol, xrtol, fatol, frtol, maxiter, callback + + +def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None, + xrtol=None, fatol=None, frtol=None, maxiter=100, + callback=None): + """Find the minimizer of an elementwise function. + + For each element of the output of `func`, `_chandrupatla_minimize` seeks + the scalar minimizer that minimizes the element. This function allows for + `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any + broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose minimizer is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array + of minima. + x1, x2, x3 : array_like + The abscissae of a standard scalar minimization bracket. A bracket is + valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``. + Must be broadcastable with one another and `args`. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `x1`, `x2`, and `x3`. If the callable to be + differentiated requires arguments that are not broadcastable with `x`, + wrap that callable with `func` such that `func` accepts only `x` and + broadcastable arrays. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the minimizer and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla_minimize` (but containing + the current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla_minimize` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + x : float + The minimizer of the function, if the algorithm terminated + successfully. + fun : float + The value of `func` evaluated at `x`. + nfev : int + The number of points at which `func` was evaluated. + nit : int + The number of iterations of the algorithm that were performed. + xl, xm, xr : float + The final three-point bracket. + fl, fm, fr : float + The function value at the bracket points. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3`` + are the values of ``func`` at those points, then the algorithm is + considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol`` + or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of + these differs from the termination conditions described in [1]_. The + default values of `xrtol` is the square root of the precision of the + appropriate dtype, and ``xatol=fatol = frtol`` is the smallest normal + number of the appropriate dtype. + + References + ---------- + .. [1] Chandrupatla, Tirupathi R. (1998). + "An efficient quadratic fit-sectioning algorithm for minimization + without derivatives". + Computer Methods in Applied Mechanics and Engineering, 152 (1-2), + 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4 + + See Also + -------- + golden, brent, bounded + + Examples + -------- + >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize + >>> def f(x, args=1): + ... return (x - args)**2 + >>> res = _chandrupatla_minimize(f, -5, 0, 5) + >>> res.x + 1.0 + >>> c = [1, 1.5, 2] + >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,)) + >>> res.x + array([1. , 1.5, 2. ]) + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + xs = (x1, x2, x3) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype = temp # line split for PEP8 + x1, x2, x3 = xs + f1, f2, f3 = fs + phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio + status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 3 # three function evaluations performed above + fatol = np.finfo(dtype).tiny if fatol is None else fatol + frtol = np.finfo(dtype).tiny if frtol is None else frtol + xatol = np.finfo(dtype).tiny if xatol is None else xatol + xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol + + # Ensure that x1 < x2 < x3 initially. + xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3)) + i = np.argsort(xs, axis=0) + x1, x2, x3 = np.take_along_axis(xs, i, axis=0) + f1, f2, f3 = np.take_along_axis(fs, i, axis=0) + q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7)) + + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status, q0=q0, args=args) + res_work_pairs = [('status', 'status'), + ('x', 'x2'), ('fun', 'f2'), + ('nit', 'nit'), ('nfev', 'nfev'), + ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'), + ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')] + + def pre_func_eval(work): + # `_check_termination` is called first -> `x3 - x2 > x2 - x1` + # But let's calculate a few terms that we'll reuse + x21 = work.x2 - work.x1 + x32 = work.x3 - work.x2 + + # [1] Section 3. "The quadratic minimum point Q1 is calculated using + # the relations developed in the previous section." [1] Section 2 (5/6) + A = x21 * (work.f3 - work.f2) + B = x32 * (work.f1 - work.f2) + C = A / (A + B) + # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2 + q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster + # this is an array, so multiplying by 0.5 does not change dtype + + # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is + # sufficiently away from the inside point x2" + i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7) + xi = q1[i] + # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of + # x2, the new point is chosen in the larger interval at a distance + # tol away from x2." + # See also QBASIC code after "Accept Ql adjust if close to X2". + j = abs(q1[i] - work.x2[i]) <= work.xtol[i] + xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j] + + # "If condition (7) is not satisfied, golden sectioning of the larger + # interval is carried out to introduce the new point." + # (For simplicity, we go ahead and calculate it for all points, but we + # change the elements for which the condition was satisfied.) + x = work.x2 + (2 - work.phi) * x32 + x[i] = xi + + # "We define Q0 as the value of Q1 at the previous iteration." + work.q0 = q1 + return x + + def post_func_eval(x, f, work): + # Standard logic for updating a three-point bracket based on a new + # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...". + # There is an awful lot of data copying going on here; this would + # probably benefit from code optimization or implementation in Pythran. + i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2) + xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i], + fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i] + j = fi > f2i + x3i[j], f3i[j] = xi[j], fi[j] + j = ~j + x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j] + + ni = ~i + xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni], + fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni] + j = fni > f2ni + x1ni[j], f1ni[j] = xni[j], fni[j] + j = ~j + x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j] + + work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i + work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i + work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni, + work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni + + def check_termination(work): + # Check for all terminal conditions and record statuses. + stop = np.zeros_like(work.x1, dtype=bool) # termination condition met + + # Bracket is invalid; stop and don't return minimizer/minimum + i = ((work.f2 > work.f1) | (work.f2 > work.f3)) + work.x2[i], work.f2[i] = np.nan, np.nan + stop[i], work.status[i] = True, eim._ESIGNERR + + # Non-finite values; stop and don't return minimizer/minimum + finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3) + i = ~(finite | stop) + work.x2[i], work.f2[i] = np.nan, np.nan + stop[i], work.status[i] = True, eim._EVALUEERR + + # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make + # the (x2, x3) the larger interval." + # Note: I had used np.choose; this is much faster. This would be a good + # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and + # didn't notice a speed boost, so let's keep it simple. + i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1) + temp = work.x1[i] + work.x1[i] = work.x3[i] + work.x3[i] = temp + temp = work.f1[i] + work.f1[i] = work.f3[i] + work.f3[i] = temp + + # [1] Section 3 (bottom of page 212) + # "We set a tolerance value xtol..." + work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8) + # "The convergence based on interval is achieved when..." + # Note: Equality allowed in case of `xtol=0` + i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9) + + # "We define ftol using..." + ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10) + # "The convergence based on function values is achieved when..." + # Note 1: modify in place to incorporate tolerance on function value. + # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop + i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11) + i &= ~stop + stop[i], work.status[i] = True, eim._ECONVERGED + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] < res['xr'] + res['xl'] = np.choose(i, (xr, xl)) + res['xr'] = np.choose(i, (xl, xr)) + res['fl'] = np.choose(i, (fr, fl)) + res['fr'] = np.choose(i, (fl, fr)) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c770500851262233d46715a6a6f9f630b24e4b87 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py new file mode 100644 index 0000000000000000000000000000000000000000..9007fe38a06a91fe456e64d74f4c0e37800f0607 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py @@ -0,0 +1,316 @@ +""" +Interface to Constrained Optimization By Linear Approximation + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_cobyla + +""" + +import functools +from threading import RLock + +import numpy as np +from scipy.optimize import _cobyla as cobyla +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +try: + from itertools import izip +except ImportError: + izip = zip + +__all__ = ['fmin_cobyla'] + +# Workaround as _cobyla.minimize is not threadsafe +# due to an unknown f2py bug and can segfault, +# see gh-9658. +_module_lock = RLock() +def synchronized(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + with _module_lock: + return func(*args, **kwargs) + return wrapper + +@synchronized +def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, + rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4, + *, callback=None): + """ + Minimize a function using the Constrained Optimization By Linear + Approximation (COBYLA) method. This method wraps a FORTRAN + implementation of the algorithm. + + Parameters + ---------- + func : callable + Function to minimize. In the form func(x, \\*args). + x0 : ndarray + Initial guess. + cons : sequence + Constraint functions; must all be ``>=0`` (a single function + if only 1 constraint). Each function takes the parameters `x` + as its first argument, and it can return either a single number or + an array or list of numbers. + args : tuple, optional + Extra arguments to pass to function. + consargs : tuple, optional + Extra arguments to pass to constraint functions (default of None means + use same extra arguments as those passed to func). + Use ``()`` for no extra arguments. + rhobeg : float, optional + Reasonable initial changes to the variables. + rhoend : float, optional + Final accuracy in the optimization (not precisely guaranteed). This + is a lower bound on the size of the trust region. + disp : {0, 1, 2, 3}, optional + Controls the frequency of output; 0 implies no output. + maxfun : int, optional + Maximum number of function evaluations. + catol : float, optional + Absolute tolerance for constraint violations. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + x : ndarray + The argument that minimises `f`. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'COBYLA' `method` in particular. + + Notes + ----- + This algorithm is based on linear approximations to the objective + function and each constraint. We briefly describe the algorithm. + + Suppose the function is being minimized over k variables. At the + jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), + an approximate solution x_j, and a radius RHO_j. + (i.e., linear plus a constant) approximations to the objective + function and constraint functions such that their function values + agree with the linear approximation on the k+1 points v_1,.., v_(k+1). + This gives a linear program to solve (where the linear approximations + of the constraint functions are constrained to be non-negative). + + However, the linear approximations are likely only good + approximations near the current simplex, so the linear program is + given the further requirement that the solution, which + will become x_(j+1), must be within RHO_j from x_j. RHO_j only + decreases, never increases. The initial RHO_j is rhobeg and the + final RHO_j is rhoend. In this way COBYLA's iterations behave + like a trust region algorithm. + + Additionally, the linear program may be inconsistent, or the + approximation may give poor improvement. For details about + how these issues are resolved, as well as how the points v_i are + updated, refer to the source code or the references below. + + + References + ---------- + Powell M.J.D. (1994), "A direct search optimization method that models + the objective and constraint functions by linear interpolation.", in + Advances in Optimization and Numerical Analysis, eds. S. Gomez and + J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 + + Powell M.J.D. (1998), "Direct search algorithms for optimization + calculations", Acta Numerica 7, 287-336 + + Powell M.J.D. (2007), "A view of algorithms for optimization without + derivatives", Cambridge University Technical Report DAMTP 2007/NA03 + + + Examples + -------- + Minimize the objective function f(x,y) = x*y subject + to the constraints x**2 + y**2 < 1 and y > 0:: + + >>> def objective(x): + ... return x[0]*x[1] + ... + >>> def constr1(x): + ... return 1 - (x[0]**2 + x[1]**2) + ... + >>> def constr2(x): + ... return x[1] + ... + >>> from scipy.optimize import fmin_cobyla + >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) + array([-0.70710685, 0.70710671]) + + The exact solution is (-sqrt(2)/2, sqrt(2)/2). + + + + """ + err = "cons must be a sequence of callable functions or a single"\ + " callable function." + try: + len(cons) + except TypeError as e: + if callable(cons): + cons = [cons] + else: + raise TypeError(err) from e + else: + for thisfunc in cons: + if not callable(thisfunc): + raise TypeError(err) + + if consargs is None: + consargs = args + + # build constraints + con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) + + # options + opts = {'rhobeg': rhobeg, + 'tol': rhoend, + 'disp': disp, + 'maxiter': maxfun, + 'catol': catol, + 'callback': callback} + + sol = _minimize_cobyla(func, x0, args, constraints=con, + **opts) + if disp and not sol['success']: + print(f"COBYLA failed to find a solution: {sol.message}") + return sol['x'] + + +@synchronized +def _minimize_cobyla(fun, x0, args=(), constraints=(), + rhobeg=1.0, tol=1e-4, maxiter=1000, + disp=False, catol=2e-4, callback=None, bounds=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using the + Constrained Optimization BY Linear Approximation (COBYLA) algorithm. + + Options + ------- + rhobeg : float + Reasonable initial changes to the variables. + tol : float + Final accuracy in the optimization (not precisely guaranteed). + This is a lower bound on the size of the trust region. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored as set to 0. + maxiter : int + Maximum number of function evaluations. + catol : float + Tolerance (absolute) for constraint violations + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + rhoend = tol + iprint = int(bool(disp)) + + # check constraints + if isinstance(constraints, dict): + constraints = (constraints, ) + + if bounds: + i_lb = np.isfinite(bounds.lb) + if np.any(i_lb): + def lb_constraint(x, *args, **kwargs): + return x[i_lb] - bounds.lb[i_lb] + + constraints.append({'type': 'ineq', 'fun': lb_constraint}) + + i_ub = np.isfinite(bounds.ub) + if np.any(i_ub): + def ub_constraint(x): + return bounds.ub[i_ub] - x[i_ub] + + constraints.append({'type': 'ineq', 'fun': ub_constraint}) + + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype != 'ineq': + raise ValueError("Constraints of type '%s' not handled by " + "COBYLA." % con['type']) + + # check function + if 'fun' not in con: + raise KeyError('Constraint %d has no function defined.' % ic) + + # check extra arguments + if 'args' not in con: + con['args'] = () + + # m is the total number of constraint values + # it takes into account that some constraints may be vector-valued + cons_lengths = [] + for c in constraints: + f = c['fun'](x0, *c['args']) + try: + cons_length = len(f) + except TypeError: + cons_length = 1 + cons_lengths.append(cons_length) + m = sum(cons_lengths) + + # create the ScalarFunction, cobyla doesn't require derivative function + def _jac(x, *args): + return None + + sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac) + + def calcfc(x, con): + f = sf.fun(x) + i = 0 + for size, c in izip(cons_lengths, constraints): + con[i: i + size] = c['fun'](x, *c['args']) + i += size + return f + + def wrapped_callback(x): + if callback is not None: + callback(np.copy(x)) + + info = np.zeros(4, np.float64) + xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, + rhoend=rhoend, iprint=iprint, maxfun=maxfun, + dinfo=info, callback=wrapped_callback) + + if info[3] > catol: + # Check constraint violation + info[0] = 4 + + return OptimizeResult(x=xopt, + status=int(info[0]), + success=info[0] == 1, + message={1: 'Optimization terminated successfully.', + 2: 'Maximum number of function evaluations ' + 'has been exceeded.', + 3: 'Rounding errors are becoming damaging ' + 'in COBYLA subroutine.', + 4: 'Did not converge to a solution ' + 'satisfying the constraints. See ' + '`maxcv` for magnitude of violation.', + 5: 'NaN result encountered.' + }.get(info[0], 'Unknown exit status.'), + nfev=int(info[1]), + fun=info[2], + maxcv=info[3]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_constraints.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..1c7ff5e170b2eb518bc6be0c667ac9f89a073dcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_constraints.py @@ -0,0 +1,590 @@ +"""Constraints definition for minimize.""" +import numpy as np +from ._hessian_update_strategy import BFGS +from ._differentiable_functions import ( + VectorFunction, LinearVectorFunction, IdentityVectorFunction) +from ._optimize import OptimizeWarning +from warnings import warn, catch_warnings, simplefilter, filterwarnings +from scipy.sparse import issparse + + +def _arr_to_scalar(x): + # If x is a numpy array, return x.item(). This will + # fail if the array has more than one element. + return x.item() if isinstance(x, np.ndarray) else x + + +class NonlinearConstraint: + """Nonlinear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= fun(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and ``fun`` returns a vector with m components. + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + fun : callable + The function defining the constraint. + The signature is ``fun(x) -> array_like, shape (m,)``. + lb, ub : array_like + Lower and upper bounds on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. + jac : {callable, '2-point', '3-point', 'cs'}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, + where element (i, j) is the partial derivative of f[i] with + respect to x[j]). The keywords {'2-point', '3-point', + 'cs'} select a finite difference scheme for the numerical estimation. + A callable must have the following signature: + ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. + Default is '2-point'. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional + Method for computing the Hessian matrix. The keywords + {'2-point', '3-point', 'cs'} select a finite difference scheme for + numerical estimation. Alternatively, objects implementing + `HessianUpdateStrategy` interface can be used to approximate the + Hessian. Currently available implementations are: + + - `BFGS` (default option) + - `SR1` + + A callable must return the Hessian matrix of ``dot(fun, v)`` and + must have the following signature: + ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. + Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + finite_diff_rel_step: None or array_like, optional + Relative step size for the finite difference approximation. Default is + None, which will select a reasonable value automatically depending + on a finite difference scheme. + finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations. A zero entry means + that a corresponding element in the Jacobian is identically zero. + If provided, forces the use of 'lsmr' trust-region solver. + If None (default) then dense differencing will be used. + + Notes + ----- + Finite difference schemes {'2-point', '3-point', 'cs'} may be used for + approximating either the Jacobian or the Hessian. We, however, do not allow + its use for approximating both simultaneously. Hence whenever the Jacobian + is estimated via finite-differences, we require the Hessian to be estimated + using one of the quasi-Newton strategies. + + The scheme 'cs' is potentially the most accurate, but requires the function + to correctly handles complex inputs and be analytically continuable to the + complex plane. The scheme '3-point' is more accurate than '2-point' but + requires twice as many operations. + + Examples + -------- + Constrain ``x[0] < sin(x[1]) + 1.9`` + + >>> from scipy.optimize import NonlinearConstraint + >>> import numpy as np + >>> con = lambda x: x[0] - np.sin(x[1]) + >>> nlc = NonlinearConstraint(con, -np.inf, 1.9) + + """ + def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), + keep_feasible=False, finite_diff_rel_step=None, + finite_diff_jac_sparsity=None): + self.fun = fun + self.lb = lb + self.ub = ub + self.finite_diff_rel_step = finite_diff_rel_step + self.finite_diff_jac_sparsity = finite_diff_jac_sparsity + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + +class LinearConstraint: + """Linear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= A.dot(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and the matrix A has shape (m, n). + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + A : {array_like, sparse matrix}, shape (m, n) + Matrix defining the constraint. + lb, ub : dense array_like, optional + Lower and upper limits on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no limits). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + if self.A.ndim != 2: + message = "`A` must have exactly two dimensions." + raise ValueError(message) + + try: + shape = self.A.shape[0:1] + self.lb = np.broadcast_to(self.lb, shape) + self.ub = np.broadcast_to(self.ub, shape) + self.keep_feasible = np.broadcast_to(self.keep_feasible, shape) + except ValueError: + message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable " + "to shape `A.shape[0:1]`") + raise ValueError(message) + + def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False): + if not issparse(A): + # In some cases, if the constraint is not valid, this emits a + # VisibleDeprecationWarning about ragged nested sequences + # before eventually causing an error. `scipy.optimize.milp` would + # prefer that this just error out immediately so it can handle it + # rather than concerning the user. + with catch_warnings(): + simplefilter("error") + self.A = np.atleast_2d(A).astype(np.float64) + else: + self.A = A + if issparse(lb) or issparse(ub): + raise ValueError("Constraint limits must be dense arrays.") + self.lb = np.atleast_1d(lb).astype(np.float64) + self.ub = np.atleast_1d(ub).astype(np.float64) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def residual(self, x): + """ + Calculate the residual between the constraint function and the limits + + For a linear constraint of the form:: + + lb <= A@x <= ub + + the lower and upper residuals between ``A@x`` and the limits are values + ``sl`` and ``sb`` such that:: + + lb + sl == A@x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + the constraint are satisfied; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of the constraint is not + satisfied. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return self.A@x - self.lb, self.ub - self.A@x + + +class Bounds: + """Bounds constraint on the variables. + + The constraint has the general inequality form:: + + lb <= x <= ub + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + lb, ub : dense array_like, optional + Lower and upper bounds on independent variables. `lb`, `ub`, and + `keep_feasible` must be the same shape or broadcastable. + Set components of `lb` and `ub` equal + to fix a variable. Use ``np.inf`` with an appropriate sign to disable + bounds on all or some variables. Note that you can mix constraints of + different types: interval, one-sided or equality, by setting different + components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no bounds). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. Must be broadcastable with `lb` and `ub`. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + try: + res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible) + self.lb, self.ub, self.keep_feasible = res + except ValueError: + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + raise ValueError(message) + + def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False): + if issparse(lb) or issparse(ub): + raise ValueError("Lower and upper bounds must be dense arrays.") + self.lb = np.atleast_1d(lb) + self.ub = np.atleast_1d(ub) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def __repr__(self): + start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}" + if np.any(self.keep_feasible): + end = f", keep_feasible={self.keep_feasible!r})" + else: + end = ")" + return start + end + + def residual(self, x): + """Calculate the residual (slack) between the input and the bounds + + For a bound constraint of the form:: + + lb <= x <= ub + + the lower and upper residuals between `x` and the bounds are values + ``sl`` and ``sb`` such that:: + + lb + sl == x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + ``x`` lie within the bounds; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of ``x`` is out of bounds. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return x - self.lb, self.ub - x + + +class PreparedConstraint: + """Constraint prepared from a user defined constraint. + + On creation it will check whether a constraint definition is valid and + the initial point is feasible. If created successfully, it will contain + the attributes listed below. + + Parameters + ---------- + constraint : {NonlinearConstraint, LinearConstraint`, Bounds} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables. + sparse_jacobian : bool or None, optional + If bool, then the Jacobian of the constraint will be converted + to the corresponded format if necessary. If None (default), such + conversion is not made. + finite_diff_bounds : 2-tuple, optional + Lower and upper bounds on the independent variables for the finite + difference approximation, if applicable. Defaults to no bounds. + + Attributes + ---------- + fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + keep_feasible : ndarray + Array indicating which components must be kept feasible with a size + equal to the number of the constraints. + """ + def __init__(self, constraint, x0, sparse_jacobian=None, + finite_diff_bounds=(-np.inf, np.inf)): + if isinstance(constraint, NonlinearConstraint): + fun = VectorFunction(constraint.fun, x0, + constraint.jac, constraint.hess, + constraint.finite_diff_rel_step, + constraint.finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian) + elif isinstance(constraint, LinearConstraint): + fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) + elif isinstance(constraint, Bounds): + fun = IdentityVectorFunction(x0, sparse_jacobian) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + m = fun.m + + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) + + lb = np.broadcast_to(lb, m) + ub = np.broadcast_to(ub, m) + keep_feasible = np.broadcast_to(keep_feasible, m) + + if keep_feasible.shape != (m,): + raise ValueError("`keep_feasible` has a wrong shape.") + + mask = keep_feasible & (lb != ub) + f0 = fun.f + if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): + raise ValueError("`x0` is infeasible with respect to some " + "inequality constraint with `keep_feasible` " + "set to True.") + + self.fun = fun + self.bounds = (lb, ub) + self.keep_feasible = keep_feasible + + def violation(self, x): + """How much the constraint is exceeded by. + + Parameters + ---------- + x : array-like + Vector of independent variables + + Returns + ------- + excess : array-like + How much the constraint is exceeded by, for each of the + constraints specified by `PreparedConstraint.fun`. + """ + with catch_warnings(): + # Ignore the following warning, it's not important when + # figuring out total violation + # UserWarning: delta_grad == 0.0. Check if the approximated + # function is linear + filterwarnings("ignore", "delta_grad", UserWarning) + ev = self.fun.fun(np.asarray(x)) + + excess_lb = np.maximum(self.bounds[0] - ev, 0) + excess_ub = np.maximum(ev - self.bounds[1], 0) + + return excess_lb + excess_ub + + +def new_bounds_to_old(lb, ub, n): + """Convert the new bounds representation to the old one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are -np.inf/np.inf they are replaced by + None. + """ + lb = np.broadcast_to(lb, n) + ub = np.broadcast_to(ub, n) + + lb = [float(x) if x > -np.inf else None for x in lb] + ub = [float(x) if x < np.inf else None for x in ub] + + return list(zip(lb, ub)) + + +def old_bound_to_new(bounds): + """Convert the old bounds representation to the new one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are None they are replaced by + -np.inf/np.inf. + """ + lb, ub = zip(*bounds) + + # Convert occurrences of None to -inf or inf, and replace occurrences of + # any numpy array x with x.item(). Then wrap the results in numpy arrays. + lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf + for x in lb]) + ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf + for x in ub]) + + return lb, ub + + +def strict_bounds(lb, ub, keep_feasible, n_vars): + """Remove bounds which are not asked to be kept feasible.""" + strict_lb = np.resize(lb, n_vars).astype(float) + strict_ub = np.resize(ub, n_vars).astype(float) + keep_feasible = np.resize(keep_feasible, n_vars) + strict_lb[~keep_feasible] = -np.inf + strict_ub[~keep_feasible] = np.inf + return strict_lb, strict_ub + + +def new_constraint_to_old(con, x0): + """ + Converts new-style constraint objects to old-style constraint dictionaries. + """ + if isinstance(con, NonlinearConstraint): + if (con.finite_diff_jac_sparsity is not None or + con.finite_diff_rel_step is not None or + not isinstance(con.hess, BFGS) or # misses user specified BFGS + con.keep_feasible): + warn("Constraint options `finite_diff_jac_sparsity`, " + "`finite_diff_rel_step`, `keep_feasible`, and `hess`" + "are ignored by this method.", + OptimizeWarning, stacklevel=3) + + fun = con.fun + if callable(con.jac): + jac = con.jac + else: + jac = None + + else: # LinearConstraint + if np.any(con.keep_feasible): + warn("Constraint option `keep_feasible` is ignored by this method.", + OptimizeWarning, stacklevel=3) + + A = con.A + if issparse(A): + A = A.toarray() + def fun(x): + return np.dot(A, x) + def jac(x): + return A + + # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, + # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. + pcon = PreparedConstraint(con, x0) + lb, ub = pcon.bounds + + i_eq = lb == ub + i_bound_below = np.logical_xor(lb != -np.inf, i_eq) + i_bound_above = np.logical_xor(ub != np.inf, i_eq) + i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) + + if np.any(i_unbounded): + warn("At least one constraint is unbounded above and below. Such " + "constraints are ignored.", + OptimizeWarning, stacklevel=3) + + ceq = [] + if np.any(i_eq): + def f_eq(x): + y = np.array(fun(x)).flatten() + return y[i_eq] - lb[i_eq] + ceq = [{"type": "eq", "fun": f_eq}] + + if jac is not None: + def j_eq(x): + dy = jac(x) + if issparse(dy): + dy = dy.toarray() + dy = np.atleast_2d(dy) + return dy[i_eq, :] + ceq[0]["jac"] = j_eq + + cineq = [] + n_bound_below = np.sum(i_bound_below) + n_bound_above = np.sum(i_bound_above) + if n_bound_below + n_bound_above: + def f_ineq(x): + y = np.zeros(n_bound_below + n_bound_above) + y_all = np.array(fun(x)).flatten() + y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] + y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) + return y + cineq = [{"type": "ineq", "fun": f_ineq}] + + if jac is not None: + def j_ineq(x): + dy = np.zeros((n_bound_below + n_bound_above, len(x0))) + dy_all = jac(x) + if issparse(dy_all): + dy_all = dy_all.toarray() + dy_all = np.atleast_2d(dy_all) + dy[:n_bound_below, :] = dy_all[i_bound_below] + dy[n_bound_below:, :] = -dy_all[i_bound_above] + return dy + cineq[0]["jac"] = j_ineq + + old_constraints = ceq + cineq + + if len(old_constraints) > 1: + warn("Equality and inequality constraints are specified in the same " + "element of the constraint list. For efficient use with this " + "method, equality and inequality constraints should be specified " + "in separate elements of the constraint list. ", + OptimizeWarning, stacklevel=3) + return old_constraints + + +def old_constraint_to_new(ic, con): + """ + Converts old-style constraint dictionaries to new-style constraint objects. + """ + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError( + 'Constraints must be a sequence of dictionaries.' + ) from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + lb = 0 + if ctype == 'eq': + ub = 0 + else: + ub = np.inf + + jac = '2-point' + if 'args' in con: + args = con['args'] + def fun(x): + return con["fun"](x, *args) + if 'jac' in con: + def jac(x): + return con["jac"](x, *args) + else: + fun = con['fun'] + if 'jac' in con: + jac = con['jac'] + + return NonlinearConstraint(fun, lb, ub, jac) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b4df4763ba4f699869431a0b6528383c2f0328 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py @@ -0,0 +1,728 @@ +import numpy as np + +""" +# 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python +c MINPACK-1 Project. June 1983. +c Argonne National Laboratory. +c Jorge J. More' and David J. Thuente. +c +c MINPACK-2 Project. November 1993. +c Argonne National Laboratory and University of Minnesota. +c Brett M. Averick, Richard G. Carter, and Jorge J. More'. +""" + +# NOTE this file was linted by black on first commit, and can be kept that way. + + +class DCSRCH: + """ + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + ftol : float + A nonnegative tolerance for the sufficient decrease condition. + gtol : float + A nonnegative tolerance for the curvature condition. + xtol : float + A nonnegative relative tolerance for an acceptable step. The + subroutine exits with a warning if the relative difference between + sty and stx is less than xtol. + stpmin : float + A nonnegative lower bound for the step. + stpmax : + A nonnegative upper bound for the step. + + Notes + ----- + + This subroutine finds a step that satisfies a sufficient + decrease condition and a curvature condition. + + Each call of the subroutine updates an interval with + endpoints stx and sty. The interval is initially chosen + so that it contains a minimizer of the modified function + + psi(stp) = f(stp) - f(0) - ftol*stp*f'(0). + + If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + interval is chosen so that it contains a minimizer of f. + + The algorithm is designed to find a step that satisfies + the sufficient decrease condition + + f(stp) <= f(0) + ftol*stp*f'(0), + + and the curvature condition + + abs(f'(stp)) <= gtol*abs(f'(0)). + + If ftol is less than gtol and if, for example, the function + is bounded below, then there is always a step which satisfies + both conditions. + + If no step can be found that satisfies both conditions, then + the algorithm stops with a warning. In this case stp only + satisfies the sufficient decrease condition. + + A typical invocation of dcsrch has the following outline: + + Evaluate the function at stp = 0.0d0; store in f. + Evaluate the gradient at stp = 0.0d0; store in g. + Choose a starting step stp. + + task = 'START' + 10 continue + call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax, + isave,dsave) + if (task .eq. 'FG') then + Evaluate the function and the gradient at stp + go to 10 + end if + + NOTE: The user must not alter work arrays between calls. + + The subroutine statement is + + subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, + task,isave,dsave) + where + + stp is a double precision variable. + On entry stp is the current estimate of a satisfactory + step. On initial entry, a positive initial estimate + must be provided. + On exit stp is the current estimate of a satisfactory step + if task = 'FG'. If task = 'CONV' then stp satisfies + the sufficient decrease and curvature condition. + + f is a double precision variable. + On initial entry f is the value of the function at 0. + On subsequent entries f is the value of the + function at stp. + On exit f is the value of the function at stp. + + g is a double precision variable. + On initial entry g is the derivative of the function at 0. + On subsequent entries g is the derivative of the + function at stp. + On exit g is the derivative of the function at stp. + + ftol is a double precision variable. + On entry ftol specifies a nonnegative tolerance for the + sufficient decrease condition. + On exit ftol is unchanged. + + gtol is a double precision variable. + On entry gtol specifies a nonnegative tolerance for the + curvature condition. + On exit gtol is unchanged. + + xtol is a double precision variable. + On entry xtol specifies a nonnegative relative tolerance + for an acceptable step. The subroutine exits with a + warning if the relative difference between sty and stx + is less than xtol. + + On exit xtol is unchanged. + + task is a character variable of length at least 60. + On initial entry task must be set to 'START'. + On exit task indicates the required action: + + If task(1:2) = 'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) = 'CONV' then the search is successful. + + If task(1:4) = 'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) = 'ERROR' then there is an error in the + input arguments. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + stpmin is a double precision variable. + On entry stpmin is a nonnegative lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is a nonnegative upper bound for the step. + On exit stpmax is unchanged. + + isave is an integer work array of dimension 2. + + dsave is a double precision work array of dimension 13. + + Subprograms called + + MINPACK-2 ... dcstep + MINPACK-1 Project. June 1983. + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick, Richard G. Carter, and Jorge J. More'. + """ + + def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax): + self.stage = None + self.ginit = None + self.gtest = None + self.gx = None + self.gy = None + self.finit = None + self.fx = None + self.fy = None + self.stx = None + self.sty = None + self.stmin = None + self.stmax = None + self.width = None + self.width1 = None + + # leave all assessment of tolerances/limits to the first call of + # this object + self.ftol = ftol + self.gtol = gtol + self.xtol = xtol + self.stpmin = stpmin + self.stpmax = stpmax + + self.phi = phi + self.derphi = derphi + + def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100): + """ + Parameters + ---------- + alpha1 : float + alpha1 is the current estimate of a satisfactory + step. A positive initial estimate must be provided. + phi0 : float + the value of `phi` at 0 (if known). + derphi0 : float + the derivative of `derphi` at 0 (if known). + maxiter : int + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found. + phi : float + Value of `phi` at the new point `alpha`. + phi0 : float + Value of `phi` at `alpha=0`. + task : bytes + On exit task indicates status information. + + If task[:4] == b'CONV' then the search is successful. + + If task[:4] == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task[:5] == b'ERROR' then there is an error in the + input arguments. + """ + if phi0 is None: + phi0 = self.phi(0.0) + if derphi0 is None: + derphi0 = self.derphi(0.0) + + phi1 = phi0 + derphi1 = derphi0 + + task = b"START" + for i in range(maxiter): + stp, phi1, derphi1, task = self._iterate( + alpha1, phi1, derphi1, task + ) + + if not np.isfinite(stp): + task = b"WARN" + stp = None + break + + if task[:2] == b"FG": + alpha1 = stp + phi1 = self.phi(stp) + derphi1 = self.derphi(stp) + else: + break + else: + # maxiter reached, the line search did not converge + stp = None + task = b"WARNING: dcsrch did not converge within max iterations" + + if task[:5] == b"ERROR" or task[:4] == b"WARN": + stp = None # failed + + return stp, phi1, phi0, task + + def _iterate(self, stp, f, g, task): + """ + Parameters + ---------- + stp : float + The current estimate of a satisfactory step. On initial entry, a + positive initial estimate must be provided. + f : float + On first call f is the value of the function at 0. On subsequent + entries f should be the value of the function at stp. + g : float + On initial entry g is the derivative of the function at 0. On + subsequent entries g is the derivative of the function at stp. + task : bytes + On initial entry task must be set to 'START'. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + + Returns + ------- + stp, f, g, task: tuple + + stp : float + the current estimate of a satisfactory step if task = 'FG'. If + task = 'CONV' then stp satisfies the sufficient decrease and + curvature condition. + f : float + the value of the function at stp. + g : float + the derivative of the function at stp. + task : bytes + On exit task indicates the required action: + + If task(1:2) == b'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) == b'CONV' then the search is successful. + + If task(1:4) == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) == b'ERROR' then there is an error in the + input arguments. + """ + p5 = 0.5 + p66 = 0.66 + xtrapl = 1.1 + xtrapu = 4.0 + + if task[:5] == b"START": + if stp < self.stpmin: + task = b"ERROR: STP .LT. STPMIN" + if stp > self.stpmax: + task = b"ERROR: STP .GT. STPMAX" + if g >= 0: + task = b"ERROR: INITIAL G .GE. ZERO" + if self.ftol < 0: + task = b"ERROR: FTOL .LT. ZERO" + if self.gtol < 0: + task = b"ERROR: GTOL .LT. ZERO" + if self.xtol < 0: + task = b"ERROR: XTOL .LT. ZERO" + if self.stpmin < 0: + task = b"ERROR: STPMIN .LT. ZERO" + if self.stpmax < self.stpmin: + task = b"ERROR: STPMAX .LT. STPMIN" + + if task[:5] == b"ERROR": + return stp, f, g, task + + # Initialize local variables. + + self.brackt = False + self.stage = 1 + self.finit = f + self.ginit = g + self.gtest = self.ftol * self.ginit + self.width = self.stpmax - self.stpmin + self.width1 = self.width / p5 + + # The variables stx, fx, gx contain the values of the step, + # function, and derivative at the best step. + # The variables sty, fy, gy contain the value of the step, + # function, and derivative at sty. + # The variables stp, f, g contain the values of the step, + # function, and derivative at stp. + + self.stx = 0.0 + self.fx = self.finit + self.gx = self.ginit + self.sty = 0.0 + self.fy = self.finit + self.gy = self.ginit + self.stmin = 0 + self.stmax = stp + xtrapu * stp + task = b"FG" + return stp, f, g, task + + # in the original Fortran this was a location to restore variables + # we don't need to do that because they're attributes. + + # If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + # algorithm enters the second stage. + ftest = self.finit + stp * self.gtest + + if self.stage == 1 and f <= ftest and g >= 0: + self.stage = 2 + + # test for warnings + if self.brackt and (stp <= self.stmin or stp >= self.stmax): + task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS" + if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax: + task = b"WARNING: XTOL TEST SATISFIED" + if stp == self.stpmax and f <= ftest and g <= self.gtest: + task = b"WARNING: STP = STPMAX" + if stp == self.stpmin and (f > ftest or g >= self.gtest): + task = b"WARNING: STP = STPMIN" + + # test for convergence + if f <= ftest and abs(g) <= self.gtol * -self.ginit: + task = b"CONVERGENCE" + + # test for termination + if task[:4] == b"WARN" or task[:4] == b"CONV": + return stp, f, g, task + + # A modified function is used to predict the step during the + # first stage if a lower function value has been obtained but + # the decrease is not sufficient. + if self.stage == 1 and f <= self.fx and f > ftest: + # Define the modified function and derivative values. + fm = f - stp * self.gtest + fxm = self.fx - self.stx * self.gtest + fym = self.fy - self.sty * self.gtest + gm = g - self.gtest + gxm = self.gx - self.gtest + gym = self.gy - self.gtest + + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + fxm, + gxm, + self.sty, + fym, + gym, + stp, + fm, + gm, + self.brackt, + self.stmin, + self.stmax, + ) + self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup + + # Reset the function and derivative values for f + self.fx = fxm + self.stx * self.gtest + self.fy = fym + self.sty * self.gtest + self.gx = gxm + self.gtest + self.gy = gym + self.gtest + + else: + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + f, + g, + self.brackt, + self.stmin, + self.stmax, + ) + ( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + self.brackt, + ) = tup + + # Decide if a bisection step is needed + if self.brackt: + if abs(self.sty - self.stx) >= p66 * self.width1: + stp = self.stx + p5 * (self.sty - self.stx) + self.width1 = self.width + self.width = abs(self.sty - self.stx) + + # Set the minimum and maximum steps allowed for stp. + if self.brackt: + self.stmin = min(self.stx, self.sty) + self.stmax = max(self.stx, self.sty) + else: + self.stmin = stp + xtrapl * (stp - self.stx) + self.stmax = stp + xtrapu * (stp - self.stx) + + # Force the step to be within the bounds stpmax and stpmin. + stp = np.clip(stp, self.stpmin, self.stpmax) + + # If further progress is not possible, let stp be the best + # point obtained during the search. + if ( + self.brackt + and (stp <= self.stmin or stp >= self.stmax) + or ( + self.brackt + and self.stmax - self.stmin <= self.xtol * self.stmax + ) + ): + stp = self.stx + + # Obtain another function and derivative + task = b"FG" + return stp, f, g, task + + +def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax): + """ + Subroutine dcstep + + This subroutine computes a safeguarded step for a search + procedure and updates an interval that contains a step that + satisfies a sufficient decrease and a curvature condition. + + The parameter stx contains the step with the least function + value. If brackt is set to .true. then a minimizer has + been bracketed in an interval with endpoints stx and sty. + The parameter stp contains the current step. + The subroutine assumes that if brackt is set to .true. then + + min(stx,sty) < stp < max(stx,sty), + + and that the derivative at stx is negative in the direction + of the step. + + The subroutine statement is + + subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, + stpmin,stpmax) + + where + + stx is a double precision variable. + On entry stx is the best step obtained so far and is an + endpoint of the interval that contains the minimizer. + On exit stx is the updated best step. + + fx is a double precision variable. + On entry fx is the function at stx. + On exit fx is the function at stx. + + dx is a double precision variable. + On entry dx is the derivative of the function at + stx. The derivative must be negative in the direction of + the step, that is, dx and stp - stx must have opposite + signs. + On exit dx is the derivative of the function at stx. + + sty is a double precision variable. + On entry sty is the second endpoint of the interval that + contains the minimizer. + On exit sty is the updated endpoint of the interval that + contains the minimizer. + + fy is a double precision variable. + On entry fy is the function at sty. + On exit fy is the function at sty. + + dy is a double precision variable. + On entry dy is the derivative of the function at sty. + On exit dy is the derivative of the function at the exit sty. + + stp is a double precision variable. + On entry stp is the current step. If brackt is set to .true. + then on input stp must be between stx and sty. + On exit stp is a new trial step. + + fp is a double precision variable. + On entry fp is the function at stp + On exit fp is unchanged. + + dp is a double precision variable. + On entry dp is the derivative of the function at stp. + On exit dp is unchanged. + + brackt is an logical variable. + On entry brackt specifies if a minimizer has been bracketed. + Initially brackt must be set to .false. + On exit brackt specifies if a minimizer has been bracketed. + When a minimizer is bracketed brackt is set to .true. + + stpmin is a double precision variable. + On entry stpmin is a lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is an upper bound for the step. + On exit stpmax is unchanged. + + MINPACK-1 Project. June 1983 + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick and Jorge J. More'. + + """ + sgn_dp = np.sign(dp) + sgn_dx = np.sign(dx) + + # sgnd = dp * (dx / abs(dx)) + sgnd = sgn_dp * sgn_dx + + # First case: A higher function value. The minimum is bracketed. + # If the cubic step is closer to stx than the quadratic step, the + # cubic step is taken, otherwise the average of the cubic and + # quadratic steps is taken. + if fp > fx: + theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp < stx: + gamma *= -1 + p = (gamma - dx) + theta + q = ((gamma - dx) + gamma) + dp + r = p / q + stpc = stx + r * (stp - stx) + stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx) + if abs(stpc - stx) <= abs(stpq - stx): + stpf = stpc + else: + stpf = stpc + (stpq - stpc) / 2.0 + brackt = True + elif sgnd < 0.0: + # Second case: A lower function value and derivatives of opposite + # sign. The minimum is bracketed. If the cubic step is farther from + # stp than the secant step, the cubic step is taken, otherwise the + # secant step is taken. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp > stx: + gamma *= -1 + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dx + r = p / q + stpc = stp + r * (stx - stp) + stpq = stp + (dp / (dp - dx)) * (stx - stp) + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + brackt = True + elif abs(dp) < abs(dx): + # Third case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative decreases. + + # The cubic step is computed only if the cubic tends to infinity + # in the direction of the step or if the minimum of the cubic + # is beyond stp. Otherwise the cubic step is defined to be the + # secant step. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + + # The case gamma = 0 only arises if the cubic does not tend + # to infinity in the direction of the step. + gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s))) + if stp > stx: + gamma = -gamma + p = (gamma - dp) + theta + q = (gamma + (dx - dp)) + gamma + r = p / q + if r < 0 and gamma != 0: + stpc = stp + r * (stx - stp) + elif stp > stx: + stpc = stpmax + else: + stpc = stpmin + stpq = stp + (dp / (dp - dx)) * (stx - stp) + + if brackt: + # A minimizer has been bracketed. If the cubic step is + # closer to stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) < abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + + if stp > stx: + stpf = min(stp + 0.66 * (sty - stp), stpf) + else: + stpf = max(stp + 0.66 * (sty - stp), stpf) + else: + # A minimizer has not been bracketed. If the cubic step is + # farther from stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + stpf = np.clip(stpf, stpmin, stpmax) + + else: + # Fourth case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative does not decrease. If the + # minimum is not bracketed, the step is either stpmin or stpmax, + # otherwise the cubic step is taken. + if brackt: + theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp + s = max(abs(theta), abs(dy), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s)) + if stp > sty: + gamma = -gamma + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dy + r = p / q + stpc = stp + r * (sty - stp) + stpf = stpc + elif stp > stx: + stpf = stpmax + else: + stpf = stpmin + + # Update the interval which contains a minimizer. + if fp > fx: + sty = stp + fy = fp + dy = dp + else: + if sgnd < 0: + sty = stx + fy = fx + dy = dx + stx = stp + fx = fp + dx = dp + + # Compute the new step. + stp = stpf + + return stx, fx, dx, sty, fy, dy, stp, brackt diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..5df1196abf56e4b3e2dbc9cf3c879f8631c6d083 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py @@ -0,0 +1,646 @@ +import numpy as np +import scipy.sparse as sps +from ._numdiff import approx_derivative, group_columns +from ._hessian_update_strategy import HessianUpdateStrategy +from scipy.sparse.linalg import LinearOperator +from scipy._lib._array_api import atleast_nd, array_namespace + + +FD_METHODS = ('2-point', '3-point', 'cs') + + +class ScalarFunction: + """Scalar function and its derivatives. + + This class defines a scalar function F: R^n->R and methods for + computing or approximating its first and second derivatives. + + Parameters + ---------- + fun : callable + evaluates the scalar function. Must be of the form ``fun(x, *args)``, + where ``x`` is the argument in the form of a 1-D array and ``args`` is + a tuple of any additional fixed parameters needed to completely specify + the function. Should return a scalar. + x0 : array-like + Provides an initial set of variables for evaluating fun. Array of real + elements of size (n,), where 'n' is the number of independent + variables. + args : tuple, optional + Any additional fixed parameters needed to completely specify the scalar + function. + grad : {callable, '2-point', '3-point', 'cs'} + Method for computing the gradient vector. + If it is a callable, it should be a function that returns the gradient + vector: + + ``grad(x, *args) -> array_like, shape (n,)`` + + where ``x`` is an array with shape (n,) and ``args`` is a tuple with + the fixed parameters. + Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used + to select a finite difference scheme for numerical estimation of the + gradient with a relative step size. These finite difference schemes + obey any specified `bounds`. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy} + Method for computing the Hessian matrix. If it is callable, it should + return the Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + where x is a (n,) ndarray and `args` is a tuple with the fixed + parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} + select a finite difference scheme for numerical estimation. Or, objects + implementing `HessianUpdateStrategy` interface can be used to + approximate the Hessian. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + finite_diff_rel_step : None or array_like + Relative step size to use. The absolute step size is computed as + ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly + adjusted to fit into the bounds. For ``method='3-point'`` the sign + of `h` is ignored. If None then finite_diff_rel_step is selected + automatically, + finite_diff_bounds : tuple of array_like + Lower and upper bounds on independent variables. Defaults to no bounds, + (-np.inf, np.inf). Each bound must match the size of `x0` or be a + scalar, in the latter case the bound will be the same for all + variables. Use it to limit the range of function evaluation. + epsilon : None or array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `epsilon` is ignored. By default + relative steps are used, only if ``epsilon is not None`` are absolute + steps used. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `grad` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, + finite_diff_bounds, epsilon=None): + if not callable(grad) and grad not in FD_METHODS: + raise ValueError( + f"`grad` must be either callable or one of {FD_METHODS}." + ) + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError( + f"`hess` must be either callable, HessianUpdateStrategy" + f" or one of {FD_METHODS}." + ) + + if grad in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the gradient is estimated via " + "finite-differences, we require the Hessian " + "to be estimated using one of the " + "quasi-Newton strategies.") + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + self.n = self.x.size + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + self.f_updated = False + self.g_updated = False + self.H_updated = False + + self._lowest_x = None + self._lowest_f = np.inf + + finite_diff_options = {} + if grad in FD_METHODS: + finite_diff_options["method"] = grad + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["bounds"] = finite_diff_bounds + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["as_linear_operator"] = True + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + # Send a copy because the user may overwrite it. + # Overwriting results in undefined behaviour because + # fun(self.x) will change self.x, with the two no longer linked. + fx = fun(np.copy(x), *args) + # Make sure the function returns a true scalar + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError( + "The user-provided objective function " + "must return a scalar value." + ) from e + + if fx < self._lowest_f: + self._lowest_x = x + self._lowest_f = fx + + return fx + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + self._update_fun() + + # Gradient evaluation + if callable(grad): + def grad_wrapped(x): + self.ngev += 1 + return np.atleast_1d(grad(np.copy(x), *args)) + + def update_grad(): + self.g = grad_wrapped(self.x) + + elif grad in FD_METHODS: + def update_grad(): + self._update_fun() + self.ngev += 1 + self.g = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + + self._update_grad_impl = update_grad + self._update_grad() + + # Hessian Evaluation + if callable(hess): + self.H = hess(np.copy(x0), *args) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x): + self.nhev += 1 + return sps.csr_matrix(hess(np.copy(x), *args)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x): + self.nhev += 1 + return hess(np.copy(x), *args) + + else: + def hess_wrapped(x): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(np.copy(x), *args))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x) + + elif hess in FD_METHODS: + def update_hess(): + self._update_grad() + self.H = approx_derivative(grad_wrapped, self.x, f0=self.g, + **finite_diff_options) + return self.H + + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.g_prev = None + + def update_hess(): + self._update_grad() + self.H.update(self.x - self.x_prev, self.g - self.g_prev) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_grad() + self.x_prev = self.x + self.g_prev = self.g + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_x_impl = update_x + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_grad(self): + if not self.g_updated: + self._update_grad_impl() + self.g_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_fun() + return self.f + + def grad(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_grad() + return self.g + + def hess(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_hess() + return self.H + + def fun_and_grad(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_fun() + self._update_grad() + return self.f, self.g + + +class VectorFunction: + """Vector function and its derivatives. + + This class defines a vector function F: R^n->R^m and methods for + computing or approximating its first and second derivatives. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `jac` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, jac, hess, + finite_diff_rel_step, finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian): + if not callable(jac) and jac not in FD_METHODS: + raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.") + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError("`hess` must be either callable," + f"HessianUpdateStrategy or one of {FD_METHODS}.") + + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.n = self.x.size + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + self.f_updated = False + self.J_updated = False + self.H_updated = False + + finite_diff_options = {} + if jac in FD_METHODS: + finite_diff_options["method"] = jac + finite_diff_options["rel_step"] = finite_diff_rel_step + if finite_diff_jac_sparsity is not None: + sparsity_groups = group_columns(finite_diff_jac_sparsity) + finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, + sparsity_groups) + finite_diff_options["bounds"] = finite_diff_bounds + self.x_diff = np.copy(self.x) + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["as_linear_operator"] = True + self.x_diff = np.copy(self.x) + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + return np.atleast_1d(fun(x)) + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + update_fun() + + self.v = np.zeros_like(self.f) + self.m = self.v.size + + # Jacobian Evaluation + if callable(jac): + self.J = jac(self.x) + self.J_updated = True + self.njev += 1 + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def jac_wrapped(x): + self.njev += 1 + return sps.csr_matrix(jac(x)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def jac_wrapped(x): + self.njev += 1 + return jac(x).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def jac_wrapped(x): + self.njev += 1 + return np.atleast_2d(jac(x)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + def update_jac(): + self.J = jac_wrapped(self.x) + + elif jac in FD_METHODS: + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + self.J_updated = True + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def update_jac(): + self._update_fun() + self.J = sps.csr_matrix( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def update_jac(): + self._update_fun() + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def update_jac(): + self._update_fun() + self.J = np.atleast_2d( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + self._update_jac_impl = update_jac + + # Define Hessian + if callable(hess): + self.H = hess(self.x, self.v) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x, v): + self.nhev += 1 + return sps.csr_matrix(hess(x, v)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x, v): + self.nhev += 1 + return hess(x, v) + + else: + def hess_wrapped(x, v): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(x, v))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x, self.v) + elif hess in FD_METHODS: + def jac_dot_v(x, v): + return jac_wrapped(x).T.dot(v) + + def update_hess(): + self._update_jac() + self.H = approx_derivative(jac_dot_v, self.x, + f0=self.J.T.dot(self.v), + args=(self.v,), + **finite_diff_options) + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.J_prev = None + + def update_hess(): + self._update_jac() + # When v is updated before x was updated, then x_prev and + # J_prev are None and we need this check. + if self.x_prev is not None and self.J_prev is not None: + delta_x = self.x - self.x_prev + delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) + self.H.update(delta_x, delta_g) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_jac() + self.x_prev = self.x + self.J_prev = self.J + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + + self._update_x_impl = update_x + + def _update_v(self, v): + if not np.array_equal(v, self.v): + self.v = v + self.H_updated = False + + def _update_x(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_jac(self): + if not self.J_updated: + self._update_jac_impl() + self.J_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + self._update_x(x) + self._update_fun() + return self.f + + def jac(self, x): + self._update_x(x) + self._update_jac() + return self.J + + def hess(self, x, v): + # v should be updated before x. + self._update_v(v) + self._update_x(x) + self._update_hess() + return self.H + + +class LinearVectorFunction: + """Linear vector function and its derivatives. + + Defines a linear function F = A x, where x is N-D vector and + A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian + is identically zero and it is returned as a csr matrix. + """ + def __init__(self, A, x0, sparse_jacobian): + if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): + self.J = sps.csr_matrix(A) + self.sparse_jacobian = True + elif sps.issparse(A): + self.J = A.toarray() + self.sparse_jacobian = False + else: + # np.asarray makes sure A is ndarray and not matrix + self.J = np.atleast_2d(np.asarray(A)) + self.sparse_jacobian = False + + self.m, self.n = self.J.shape + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.f = self.J.dot(self.x) + self.f_updated = True + + self.v = np.zeros(self.m, dtype=float) + self.H = sps.csr_matrix((self.n, self.n)) + + def _update_x(self, x): + if not np.array_equal(x, self.x): + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + + def fun(self, x): + self._update_x(x) + if not self.f_updated: + self.f = self.J.dot(x) + self.f_updated = True + return self.f + + def jac(self, x): + self._update_x(x) + return self.J + + def hess(self, x, v): + self._update_x(x) + self.v = v + return self.H + + +class IdentityVectorFunction(LinearVectorFunction): + """Identity vector function and its derivatives. + + The Jacobian is the identity matrix, returned as a dense array when + `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is + identically zero and it is returned as a csr matrix. + """ + def __init__(self, x0, sparse_jacobian): + n = len(x0) + if sparse_jacobian or sparse_jacobian is None: + A = sps.eye(n, format='csr') + sparse_jacobian = True + else: + A = np.eye(n) + sparse_jacobian = False + super().__init__(A, x0, sparse_jacobian) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiate.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..0b59a4fdd9924857d9191b5532387ff085a466a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_differentiate.py @@ -0,0 +1,669 @@ +# mypy: disable-error-code="attr-defined" +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +_EERRORINCREASE = -1 # used in _differentiate + +def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback): + # Input validation for `_differentiate` + + if not callable(func): + raise ValueError('`func` must be callable.') + + # x has more complex IV that is taken care of during initialization + x = np.asarray(x) + dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64 + + if not np.iterable(args): + args = (args,) + + if atol is None: + atol = np.finfo(dtype).tiny + + if rtol is None: + rtol = np.sqrt(np.finfo(dtype).eps) + + message = 'Tolerances and step parameters must be non-negative scalars.' + tols = np.asarray([atol, rtol, initial_step, step_factor]) + if (not np.issubdtype(tols.dtype, np.number) + or np.any(tols < 0) + or tols.shape != (4,)): + raise ValueError(message) + initial_step, step_factor = tols[2:].astype(dtype) + + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter <= 0: + raise ValueError('`maxiter` must be a positive integer.') + + order_int = int(order) + if order_int != order or order <= 0: + raise ValueError('`order` must be a positive integer.') + + step_direction = np.sign(step_direction).astype(dtype) + x, step_direction = np.broadcast_arrays(x, step_direction) + x, step_direction = x[()], step_direction[()] + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step, + step_factor, step_direction, preserve_shape, callback) + + +def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10, + order=8, initial_step=0.5, step_factor=2.0, + step_direction=0, preserve_shape=False, callback=None): + """Evaluate the derivative of an elementwise scalar function numerically. + + Parameters + ---------- + func : callable + The function whose derivative is desired. The signature must be:: + + func(x: ndarray, *fargs) -> ndarray + + where each element of ``x`` is a finite real and ``fargs`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + x : array_like + Abscissae at which to evaluate the derivative. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `x`. If the callable to be differentiated requires + arguments that are not broadcastable with `x`, wrap that callable with + `func`. See Examples. + atol, rtol : float, optional + Absolute and relative tolerances for the stopping condition: iteration + will stop when ``res.error < atol + rtol * abs(res.df)``. The default + `atol` is the smallest normal number of the appropriate dtype, and + the default `rtol` is the square root of the precision of the + appropriate dtype. + order : int, default: 8 + The (positive integer) order of the finite difference formula to be + used. Odd integers will be rounded up to the next even integer. + initial_step : float, default: 0.5 + The (absolute) initial step size for the finite difference derivative + approximation. + step_factor : float, default: 2.0 + The factor by which the step size is *reduced* in each iteration; i.e. + the step size in iteration 1 is ``initial_step/step_factor``. If + ``step_factor < 1``, subsequent steps will be greater than the initial + step; this may be useful if steps smaller than some threshold are + undesirable (e.g. due to subtractive cancellation error). + maxiter : int, default: 10 + The maximum number of iterations of the algorithm to perform. See + notes. + step_direction : array_like + An array representing the direction of the finite difference steps (for + use when `x` lies near to the boundary of the domain of the function.) + Must be broadcastable with `x` and all `args`. + Where 0 (default), central differences are used; where negative (e.g. + -1), steps are non-positive; and where positive (e.g. 1), all steps are + non-negative. + preserve_shape : bool, default: False + In the following, "arguments of `func`" refers to the array ``x`` and + any arrays within ``fargs``. Let ``shape`` be the broadcasted shape + of `x` and all elements of `args` (which is conceptually + distinct from ``fargs`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi`` within `x`, the array + returned by `f` must include the scalar ``f(xi)`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``x``. + + See Examples. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_differentiate` (but containing the + current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_differentiate` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The error estimate increased, so iteration was terminated. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + df : float + The derivative of `func` at `x`, if the algorithm terminated + successfully. + error : float + An estimate of the error: the magnitude of the difference between + the current estimate of the derivative and the estimate in the + previous iteration. + nit : int + The number of iterations performed. + nfev : int + The number of points at which `func` was evaluated. + x : float + The value at which the derivative of `func` was evaluated + (after broadcasting with `args` and `step_direction`). + + Notes + ----- + The implementation was inspired by jacobi [1]_, numdifftools [2]_, and + DERIVEST [3]_, but the implementation follows the theory of Taylor series + more straightforwardly (and arguably naively so). + In the first iteration, the derivative is estimated using a finite + difference formula of order `order` with maximum step size `initial_step`. + Each subsequent iteration, the maximum step size is reduced by + `step_factor`, and the derivative is estimated again until a termination + condition is reached. The error estimate is the magnitude of the difference + between the current derivative approximation and that of the previous + iteration. + + The stencils of the finite difference formulae are designed such that + abscissae are "nested": after `func` is evaluated at ``order + 1`` + points in the first iteration, `func` is evaluated at only two new points + in each subsequent iteration; ``order - 1`` previously evaluated function + values required by the finite difference formula are reused, and two + function values (evaluations at the points furthest from `x`) are unused. + + Step sizes are absolute. When the step size is small relative to the + magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the + default initial step size of ``0.5`` cannot be resolved. Accordingly, + consider using larger initial step sizes for large magnitudes of `x`. + + The default tolerances are challenging to satisfy at points where the + true derivative is exactly zero. If the derivative may be exactly zero, + consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to + improve convergence. + + References + ---------- + [1]_ Hans Dembinski (@HDembinski). jacobi. + https://github.com/HDembinski/jacobi + [2]_ Per A. Brodtkorb and John D'Errico. numdifftools. + https://numdifftools.readthedocs.io/en/latest/ + [3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation. + https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation + [4]_ Numerical Differentition. Wikipedia. + https://en.wikipedia.org/wiki/Numerical_differentiation + + Examples + -------- + Evaluate the derivative of ``np.exp`` at several points ``x``. + + >>> import numpy as np + >>> from scipy.optimize._differentiate import _differentiate + >>> f = np.exp + >>> df = np.exp # true derivative + >>> x = np.linspace(1, 2, 5) + >>> res = _differentiate(f, x) + >>> res.df # approximation of the derivative + array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ]) + >>> res.error # estimate of the error + array( + [7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11] + ) + >>> abs(res.df - df(x)) # true error + array( + [3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14] + ) + + Show the convergence of the approximation as the step size is reduced. + Each iteration, the step size is reduced by `step_factor`, so for + sufficiently small initial step, each iteration reduces the error by a + factor of ``1/step_factor**order`` until finite precision arithmetic + inhibits further improvement. + + >>> iter = list(range(1, 12)) # maximum iterations + >>> hfac = 2 # step size reduction per iteration + >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps + >>> order = 4 # order of differentiation formula + >>> x = 1 + >>> ref = df(x) + >>> errors = [] # true error + >>> for i in iter: + ... res = _differentiate(f, x, maxiter=i, step_factor=hfac, + ... step_direction=hdir, order=order, + ... atol=0, rtol=0) # prevent early termination + ... errors.append(abs(res.df - ref)) + >>> errors = np.array(errors) + >>> plt.semilogy(iter, errors[:, 0], label='left differences') + >>> plt.semilogy(iter, errors[:, 1], label='central differences') + >>> plt.semilogy(iter, errors[:, 2], label='right differences') + >>> plt.xlabel('iteration') + >>> plt.ylabel('error') + >>> plt.legend() + >>> plt.show() + >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order) + (0.06215223140159822, 0.0625) + + The implementation is vectorized over `x`, `step_direction`, and `args`. + The function is evaluated once before the first iteration to perform input + validation and standardization, and once per iteration thereafter. + + >>> def f(x, p): + ... print('here') + ... f.nit += 1 + ... return x**p + >>> f.nit = 0 + >>> def df(x, p): + ... return p*x**(p-1) + >>> x = np.arange(1, 5) + >>> p = np.arange(1, 6).reshape((-1, 1)) + >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1)) + >>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1) + >>> np.allclose(res.df, df(x, p)) + True + >>> res.df.shape + (3, 5, 4) + >>> f.nit + 2 + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 5, 10, 20] + >>> res = _differentiate(f, 0, args=(c,)) + >>> shapes + [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)] + + To understand where these shapes are coming from - and to better + understand how `_differentiate` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the function's derivative change + faster, so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([11, 13, 15, 17]) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + function at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + function at an initial grid of abscissae and all four frequencies. + Successive calls to the function evaluate the function at two more + abscissae, increasing the effective order of the approximation by two. + However, in later function evaluations, the function is evaluated at + fewer frequencies because the corresponding derivative has already + converged to the required tolerance. This saves function evaluations to + improve performance, but it requires the function to accept arguments of + any shape. + + "Vector-valued" functions are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2] + + This integrand is not compatible with `_differentiate` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2] + >>> + >>> x = np.zeros(4) + >>> res = _differentiate(f, x, preserve_shape=True) + >>> shapes + [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)] + + Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the + function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``, + and this is what we observe. + + """ + # TODO (followup): + # - investigate behavior at saddle points + # - array initial_step / step_factor? + # - multivariate functions? + + res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback) + (func, x, args, atol, rtol, maxiter, order, + h0, fac, hdir, preserve_shape, callback) = res + + # Initialization + # Since f(x) (no step) is not needed for central differences, it may be + # possible to eliminate this function evaluation. However, it's useful for + # input validation and standardization, and everything else is designed to + # reduce function calls, so let's keep it simple. + temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape) + func, xs, fs, args, shape, dtype = temp + x, f = xs[0], fs[0] + df = np.full_like(f, np.nan) + # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but + # it's simpler to do it here than to generalize `_elementwise_algo_init` further. + # `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know + # that `hdir` can be broadcasted to the final shape. + hdir = np.broadcast_to(hdir, shape).flatten() + + status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 1 # one function evaluations performed above + # Boolean indices of left, central, right, and (all) one-sided steps + il = hdir < 0 + ic = hdir == 0 + ir = hdir > 0 + io = il | ir + + # Most of these attributes are reasonably obvious, but: + # - `fs` holds all the function values of all active `x`. The zeroth + # axis corresponds with active points `x`, the first axis corresponds + # with the different steps (in the order described in + # `_differentiate_weights`). + # - `terms` (which could probably use a better name) is half the `order`, + # which is always even. + work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0, + df_last=np.nan, error_last=np.nan, h0=h0, fac=fac, + atol=atol, rtol=rtol, nit=nit, nfev=nfev, + status=status, dtype=dtype, terms=(order+1)//2, + hdir=hdir, il=il, ic=ic, ir=ir, io=io) + # This is the correspondence between terms in the `work` object and the + # final result. In this case, the mapping is trivial. Note that `success` + # is prepended automatically. + res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'), + ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')] + + def pre_func_eval(work): + """Determine the abscissae at which the function needs to be evaluated. + + See `_differentiate_weights` for a description of the stencil (pattern + of the abscissae). + + In the first iteration, there is only one stored function value in + `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In + subsequent iterations, we evaluate at two new points. Note that + `work.x` is always flattened into a 1D array after broadcasting with + all `args`, so we add a new axis at the end and evaluate all point + in one call to the function. + + For improvement: + - Consider measuring the step size actually taken, since `(x + h) - x` + is not identically equal to `h` with floating point arithmetic. + - Adjust the step size automatically if `x` is too big to resolve the + step. + - We could probably save some work if there are no central difference + steps or no one-sided steps. + """ + n = work.terms # half the order + h = work.h # step size + c = work.fac # step reduction factor + d = c**0.5 # square root of step reduction factor (one-sided stencil) + # Note - no need to be careful about dtypes until we allocate `x_eval` + + if work.nit == 0: + hc = h / c**np.arange(n) + hc = np.concatenate((-hc[::-1], hc)) + else: + hc = np.asarray([-h, h]) / c**(n-1) + + if work.nit == 0: + hr = h / d**np.arange(2*n) + else: + hr = np.asarray([h, h/d]) / c**(n-1) + + n_new = 2*n if work.nit == 0 else 2 # number of new abscissae + x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype) + il, ic, ir = work.il, work.ic, work.ir + x_eval[ir] = work.x[ir, np.newaxis] + hr + x_eval[ic] = work.x[ic, np.newaxis] + hc + x_eval[il] = work.x[il, np.newaxis] - hr + return x_eval + + def post_func_eval(x, f, work): + """ Estimate the derivative and error from the function evaluations + + As in `pre_func_eval`: in the first iteration, there is only one stored + function value in `work.fs`, `f(x)`, so we need to add the `order` new + points. In subsequent iterations, we add two new points. The tricky + part is getting the order to match that of the weights, which is + described in `_differentiate_weights`. + + For improvement: + - Change the order of the weights (and steps in `pre_func_eval`) to + simplify `work_fc` concatenation and eliminate `fc` concatenation. + - It would be simple to do one-step Richardson extrapolation with `df` + and `df_last` to increase the order of the estimate and/or improve + the error estimate. + - Process the function evaluations in a more numerically favorable + way. For instance, combining the pairs of central difference evals + into a second-order approximation and using Richardson extrapolation + to produce a higher order approximation seemed to retain accuracy up + to very high order. + - Alternatively, we could use `polyfit` like Jacobi. An advantage of + fitting polynomial to more points than necessary is improved noise + tolerance. + """ + n = work.terms + n_new = n if work.nit == 0 else 1 + il, ic, io = work.il, work.ic, work.io + + # Central difference + # `work_fc` is *all* the points at which the function has been evaluated + # `fc` is the points we're using *this iteration* to produce the estimate + work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:]) + work_fc = np.concatenate(work_fc, axis=-1) + if work.nit == 0: + fc = work_fc + else: + fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:]) + fc = np.concatenate(fc, axis=-1) + + # One-sided difference + work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1) + if work.nit == 0: + fo = work_fo + else: + fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1) + + work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new)) + work.fs[ic] = work_fc + work.fs[io] = work_fo + + wc, wo = _differentiate_weights(work, n) + work.df_last = work.df.copy() + work.df[ic] = fc @ wc / work.h + work.df[io] = fo @ wo / work.h + work.df[il] *= -1 + + work.h /= work.fac + work.error_last = work.error + # Simple error estimate - the difference in derivative estimates between + # this iteration and the last. This is typically conservative because if + # convergence has begin, the true error is much closer to the difference + # between the current estimate and the *next* error estimate. However, + # we could use Richarson extrapolation to produce an error estimate that + # is one order higher, and take the difference between that and + # `work.df` (which would just be constant factor that depends on `fac`.) + work.error = abs(work.df - work.df_last) + + def check_termination(work): + """Terminate due to convergence, non-finite values, or error increase""" + stop = np.zeros_like(work.df).astype(bool) + + i = work.error < work.atol + work.rtol*abs(work.df) + work.status[i] = eim._ECONVERGED + stop[i] = True + + if work.nit > 0: + i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop) + work.df[i], work.status[i] = np.nan, eim._EVALUEERR + stop[i] = True + + # With infinite precision, there is a step size below which + # all smaller step sizes will reduce the error. But in floating point + # arithmetic, catastrophic cancellation will begin to cause the error + # to increase again. This heuristic tries to avoid step sizes that are + # too small. There may be more theoretically sound approaches for + # detecting a step size that minimizes the total error, but this + # heuristic seems simple and effective. + i = (work.error > work.error_last*10) & ~stop + work.status[i] = _EERRORINCREASE + stop[i] = True + + return stop + + def post_termination_check(work): + return + + def customize_result(res, shape): + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + preserve_shape) + + +def _differentiate_weights(work, n): + # This produces the weights of the finite difference formula for a given + # stencil. In experiments, use of a second-order central difference formula + # with Richardson extrapolation was more accurate numerically, but it was + # more complicated, and it would have become even more complicated when + # adding support for one-sided differences. However, now that all the + # function evaluation values are stored, they can be processed in whatever + # way is desired to produce the derivative estimate. We leave alternative + # approaches to future work. To be more self-contained, here is the theory + # for deriving the weights below. + # + # Recall that the Taylor expansion of a univariate, scalar-values function + # about a point `x` may be expressed as: + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # Suppose we evaluate f(x), f(x+h), and f(x-h). We have: + # f(x) = f(x) + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # We can solve for weights `wi` such that: + # w1*f(x) = w1*(f(x)) + # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # = 0 + f'(x)*h + 0 + O(h**3) + # Then + # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h + # is a finite difference derivative approximation with error O(h**2), + # and so it is said to be a "second-order" approximation. Under certain + # conditions (e.g. well-behaved function, `h` sufficiently small), the + # error in the approximation will decrease with h**2; that is, if `h` is + # reduced by a factor of 2, the error is reduced by a factor of 4. + # + # By default, we use eighth-order formulae. Our central-difference formula + # uses abscissae: + # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3 + # where `c` is the step factor. (Typically, the step factor is greater than + # one, so the outermost points - as written above - are actually closest to + # `x`.) This "stencil" is chosen so that each iteration, the step can be + # reduced by the factor `c`, and most of the function evaluations can be + # reused with the new step size. For example, in the next iteration, we + # will have: + # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4 + # We do not reuse `x-h` and `x+h` for the new derivative estimate. + # While this would increase the order of the formula and thus the + # theoretical convergence rate, it is also less stable numerically. + # (As noted above, there are other ways of processing the values that are + # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs` + # to simplify future development of this sort of improvement.) + # + # The (right) one-sided formula is produced similarly using abscissae + # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7 + # where `d` is the square root of `c`. (The left one-sided formula simply + # uses -h.) When the step size is reduced by factor `c = d**2`, we have + # abscissae: + # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9 + # `d` is chosen as the square root of `c` so that the rate of the step-size + # reduction is the same per iteration as in the central difference case. + # Note that because the central difference formulas are inherently of even + # order, for simplicity, we use only even-order formulas for one-sided + # differences, too. + + # It's possible for the user to specify `fac` in, say, double precision but + # `x` and `args` in single precision. `fac` gets converted to single + # precision, but we should always use double precision for the intermediate + # calculations here to avoid additional error in the weights. + fac = work.fac.astype(np.float64) + + # Note that if the user switches back to floating point precision with + # `x` and `args`, then `fac` will not necessarily equal the (lower + # precision) cached `_differentiate_weights.fac`, and the weights will + # need to be recalculated. This could be fixed, but it's late, and of + # low consequence. + if fac != _differentiate_weights.fac: + _differentiate_weights.central = [] + _differentiate_weights.right = [] + _differentiate_weights.fac = fac + + if len(_differentiate_weights.central) != 2*n + 1: + # Central difference weights. Consider refactoring this; it could + # probably be more compact. + i = np.arange(-n, n + 1) + p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0 + s = np.sign(i) + + h = s / fac ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2*n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + # Enforce identities to improve accuracy + weights[n] = 0 + for i in range(n): + weights[-i-1] = -weights[i] + + # Cache the weights. We only need to calculate them once unless + # the step factor changes. + _differentiate_weights.central = weights + + # One-sided difference weights. The left one-sided weights (with + # negative steps) are simply the negative of the right one-sided + # weights, so no need to compute them separately. + i = np.arange(2*n + 1) + p = i - 1. + s = np.sign(i) + + h = s / np.sqrt(fac) ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2 * n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + _differentiate_weights.right = weights + + return (_differentiate_weights.central.astype(work.dtype, copy=False), + _differentiate_weights.right.astype(work.dtype, copy=False)) +_differentiate_weights.central = [] +_differentiate_weights.right = [] +_differentiate_weights.fac = None diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..52d4dd69994a38d7db77949e42c646280926879f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct_py.py new file mode 100644 index 0000000000000000000000000000000000000000..440cbb5ae866462b6299b1e12d4a6ba1e407fd62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_direct_py.py @@ -0,0 +1,278 @@ +from __future__ import annotations +from typing import ( # noqa: UP035 + Any, Callable, Iterable, TYPE_CHECKING +) + +import numpy as np +from scipy.optimize import OptimizeResult +from ._constraints import old_bound_to_new, Bounds +from ._direct import direct as _direct # type: ignore + +if TYPE_CHECKING: + import numpy.typing as npt + +__all__ = ['direct'] + +ERROR_MESSAGES = ( + "Number of function evaluations done is larger than maxfun={}", + "Number of iterations is larger than maxiter={}", + "u[i] < l[i] for some i", + "maxfun is too large", + "Initialization failed", + "There was an error in the creation of the sample points", + "An error occurred while the function was sampled", + "Maximum number of levels has been reached.", + "Forced stop", + "Invalid arguments", + "Out of memory", +) + +SUCCESS_MESSAGES = ( + ("The best function value found is within a relative error={} " + "of the (known) global optimum f_min"), + ("The volume of the hyperrectangle containing the lowest function value " + "found is below vol_tol={}"), + ("The side length measure of the hyperrectangle containing the lowest " + "function value found is below len_tol={}"), +) + + +def direct( + func: Callable[[npt.ArrayLike, tuple[Any]], float], + bounds: Iterable | Bounds, + *, + args: tuple = (), + eps: float = 1e-4, + maxfun: int | None = None, + maxiter: int = 1000, + locally_biased: bool = True, + f_min: float = -np.inf, + f_min_rtol: float = 1e-4, + vol_tol: float = 1e-16, + len_tol: float = 1e-6, + callback: Callable[[npt.ArrayLike], None] | None = None +) -> OptimizeResult: + """ + Finds the global minimum of a function using the + DIRECT algorithm. + + Parameters + ---------- + func : callable + The objective function to be minimized. + ``func(x, *args) -> float`` + where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of + the fixed parameters needed to completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``. + + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + eps : float, optional + Minimal required difference of the objective function values + between the current best hyperrectangle and the next potentially + optimal hyperrectangle to be divided. In consequence, `eps` serves as a + tradeoff between local and global search: the smaller, the more local + the search becomes. Default is 1e-4. + maxfun : int or None, optional + Approximate upper bound on objective function evaluations. + If `None`, will be automatically set to ``1000 * N`` where ``N`` + represents the number of dimensions. Will be capped if necessary to + limit DIRECT's RAM usage to app. 1GiB. This will only occur for very + high dimensional problems and excessive `max_fun`. Default is `None`. + maxiter : int, optional + Maximum number of iterations. Default is 1000. + locally_biased : bool, optional + If `True` (default), use the locally biased variant of the + algorithm known as DIRECT_L. If `False`, use the original unbiased + DIRECT algorithm. For hard problems with many local minima, + `False` is recommended. + f_min : float, optional + Function value of the global optimum. Set this value only if the + global optimum is known. Default is ``-np.inf``, so that this + termination criterion is deactivated. + f_min_rtol : float, optional + Terminate the optimization once the relative error between the + current best minimum `f` and the supplied global minimum `f_min` + is smaller than `f_min_rtol`. This parameter is only used if + `f_min` is also set. Must lie between 0 and 1. Default is 1e-4. + vol_tol : float, optional + Terminate the optimization once the volume of the hyperrectangle + containing the lowest function value is smaller than `vol_tol` + of the complete search space. Must lie between 0 and 1. + Default is 1e-16. + len_tol : float, optional + If `locally_biased=True`, terminate the optimization once half of + the normalized maximal side length of the hyperrectangle containing + the lowest function value is smaller than `len_tol`. + If `locally_biased=False`, terminate the optimization once half of + the normalized diagonal of the hyperrectangle containing the lowest + function value is smaller than `len_tol`. Must lie between 0 and 1. + Default is 1e-6. + callback : callable, optional + A callback function with signature ``callback(xk)`` where ``xk`` + represents the best function value found so far. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Notes + ----- + DIviding RECTangles (DIRECT) is a deterministic global + optimization algorithm capable of minimizing a black box function with + its variables subject to lower and upper bound constraints by sampling + potential solutions in the search space [1]_. The algorithm starts by + normalising the search space to an n-dimensional unit hypercube. + It samples the function at the center of this hypercube and at 2n + (n is the number of variables) more points, 2 in each coordinate + direction. Using these function values, DIRECT then divides the + domain into hyperrectangles, each having exactly one of the sampling + points as its center. In each iteration, DIRECT chooses, using the `eps` + parameter which defaults to 1e-4, some of the existing hyperrectangles + to be further divided. This division process continues until either the + maximum number of iterations or maximum function evaluations allowed + are exceeded, or the hyperrectangle containing the minimal value found + so far becomes small enough. If `f_min` is specified, the optimization + will stop once this function value is reached within a relative tolerance. + The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is + used by default. It makes the search more locally biased and more + efficient for cases with only a few local minima. + + A note about termination criteria: `vol_tol` refers to the volume of the + hyperrectangle containing the lowest function value found so far. This + volume decreases exponentially with increasing dimensionality of the + problem. Therefore `vol_tol` should be decreased to avoid premature + termination of the algorithm for higher dimensions. This does not hold + for `len_tol`: it refers either to half of the maximal side length + (for ``locally_biased=True``) or half of the diagonal of the + hyperrectangle (for ``locally_biased=False``). + + This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at + https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz . + This original version was initially converted via f2c and then cleaned up + and reorganized by Steven G. Johnson, August 2007, for the NLopt project. + The `direct` function wraps the C implementation. + + .. versionadded:: 1.9.0 + + References + ---------- + .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian + optimization without the Lipschitz constant. J Optim Theory Appl + 79, 157-181 (1993). + .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT + Algorithm. Journal of Global Optimization 21, 27-37 (2001). + + Examples + -------- + The following example is a 2-D problem with four local minima: minimizing + the Styblinski-Tang function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> from scipy.optimize import direct, Bounds + >>> def styblinski_tang(pos): + ... x, y = pos + ... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y) + >>> bounds = Bounds([-4., -4.], [4., 4.]) + >>> result = direct(styblinski_tang, bounds) + >>> result.x, result.fun, result.nfev + array([-2.90321597, -2.90321597]), -78.3323279095383, 2011 + + The correct global minimum was found but with a huge number of function + evaluations (2011). Loosening the termination tolerances `vol_tol` and + `len_tol` can be used to stop DIRECT earlier. + + >>> result = direct(styblinski_tang, bounds, len_tol=1e-3) + >>> result.x, result.fun, result.nfev + array([-2.9044353, -2.9044353]), -78.33230330754142, 207 + + """ + # convert bounds to new Bounds class if necessary + if not isinstance(bounds, Bounds): + if isinstance(bounds, list) or isinstance(bounds, tuple): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + else: + message = ("bounds must be a sequence or " + "instance of Bounds class") + raise ValueError(message) + + lb = np.ascontiguousarray(bounds.lb, dtype=np.float64) + ub = np.ascontiguousarray(bounds.ub, dtype=np.float64) + + # validate bounds + # check that lower bounds are smaller than upper bounds + if not np.all(lb < ub): + raise ValueError('Bounds are not consistent min < max') + # check for infs + if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))): + raise ValueError("Bounds must not be inf.") + + # validate tolerances + if (vol_tol < 0 or vol_tol > 1): + raise ValueError("vol_tol must be between 0 and 1.") + if (len_tol < 0 or len_tol > 1): + raise ValueError("len_tol must be between 0 and 1.") + if (f_min_rtol < 0 or f_min_rtol > 1): + raise ValueError("f_min_rtol must be between 0 and 1.") + + # validate maxfun and maxiter + if maxfun is None: + maxfun = 1000 * lb.shape[0] + if not isinstance(maxfun, int): + raise ValueError("maxfun must be of type int.") + if maxfun < 0: + raise ValueError("maxfun must be > 0.") + if not isinstance(maxiter, int): + raise ValueError("maxiter must be of type int.") + if maxiter < 0: + raise ValueError("maxiter must be > 0.") + + # validate boolean parameters + if not isinstance(locally_biased, bool): + raise ValueError("locally_biased must be True or False.") + + def _func_wrap(x, args=None): + x = np.asarray(x) + if args is None: + f = func(x) + else: + f = func(x, *args) + # always return a float + return np.asarray(f).item() + + # TODO: fix disp argument + x, fun, ret_code, nfev, nit = _direct( + _func_wrap, + np.asarray(lb), np.asarray(ub), + args, + False, eps, maxfun, maxiter, + locally_biased, + f_min, f_min_rtol, + vol_tol, len_tol, callback + ) + + format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol) + if ret_code > 2: + message = SUCCESS_MESSAGES[ret_code - 3].format( + format_val[ret_code - 1]) + elif 0 < ret_code <= 2: + message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1]) + elif 0 > ret_code > -100: + message = ERROR_MESSAGES[abs(ret_code) + 1] + else: + message = ERROR_MESSAGES[ret_code + 99] + + return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code, + success=ret_code > 2, message=message, + nfev=nfev, nit=nit) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..0dd9eed9dbf9670ee6c435fa2cbd60e55971e804 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py @@ -0,0 +1,715 @@ +# Dual Annealing implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, Yang Xiang, PMP S.A. + +""" +A Dual Annealing global optimization algorithm +""" + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize import minimize, Bounds +from scipy.special import gammaln +from scipy._lib._util import check_random_state +from scipy.optimize._constraints import new_bounds_to_old + +__all__ = ['dual_annealing'] + + +class VisitingDistribution: + """ + Class used to generate new coordinates based on the distorted + Cauchy-Lorentz distribution. Depending on the steps within the strategy + chain, the class implements the strategy for generating new location + changes. + + Parameters + ---------- + lb : array_like + A 1-D NumPy ndarray containing lower bounds of the generated + components. Neither NaN or inf are allowed. + ub : array_like + A 1-D NumPy ndarray containing upper bounds for the generated + components. Neither NaN or inf are allowed. + visiting_param : float + Parameter for visiting distribution. Default value is 2.62. + Higher values give the visiting distribution a heavier tail, this + makes the algorithm jump to a more distant region. + The value range is (1, 3]. Its value is fixed for the life of the + object. + rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`} + A `~numpy.random.RandomState`, `~numpy.random.Generator` object + for using the current state of the created random generator container. + + """ + TAIL_LIMIT = 1.e8 + MIN_VISIT_BOUND = 1.e-10 + + def __init__(self, lb, ub, visiting_param, rand_gen): + # if you wish to make _visiting_param adjustable during the life of + # the object then _factor2, _factor3, _factor5, _d1, _factor6 will + # have to be dynamically calculated in `visit_fn`. They're factored + # out here so they don't need to be recalculated all the time. + self._visiting_param = visiting_param + self.rand_gen = rand_gen + self.lower = lb + self.upper = ub + self.bound_range = ub - lb + + # these are invariant numbers unless visiting_param changes + self._factor2 = np.exp((4.0 - self._visiting_param) * np.log( + self._visiting_param - 1.0)) + self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0) + / (self._visiting_param - 1.0)) + self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * ( + 3.0 - self._visiting_param)) + + self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5 + self._d1 = 2.0 - self._factor5 + self._factor6 = np.pi * (1.0 - self._factor5) / np.sin( + np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1)) + + def visiting(self, x, step, temperature): + """ Based on the step in the strategy chain, new coordinates are + generated by changing all components is the same time or only + one of them, the new values are computed with visit_fn method + """ + dim = x.size + if step < dim: + # Changing all coordinates with a new visiting value + visits = self.visit_fn(temperature, dim) + upper_sample, lower_sample = self.rand_gen.uniform(size=2) + visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample + visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample + x_visit = visits + x + a = x_visit - self.lower + b = np.fmod(a, self.bound_range) + self.bound_range + x_visit = np.fmod(b, self.bound_range) + self.lower + x_visit[np.fabs( + x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10 + else: + # Changing only one coordinate at a time based on strategy + # chain step + x_visit = np.copy(x) + visit = self.visit_fn(temperature, 1)[0] + if visit > self.TAIL_LIMIT: + visit = self.TAIL_LIMIT * self.rand_gen.uniform() + elif visit < -self.TAIL_LIMIT: + visit = -self.TAIL_LIMIT * self.rand_gen.uniform() + index = step - dim + x_visit[index] = visit + x[index] + a = x_visit[index] - self.lower[index] + b = np.fmod(a, self.bound_range[index]) + self.bound_range[index] + x_visit[index] = np.fmod(b, self.bound_range[ + index]) + self.lower[index] + if np.fabs(x_visit[index] - self.lower[ + index]) < self.MIN_VISIT_BOUND: + x_visit[index] += self.MIN_VISIT_BOUND + return x_visit + + def visit_fn(self, temperature, dim): + """ Formula Visita from p. 405 of reference [2] """ + x, y = self.rand_gen.normal(size=(dim, 2)).T + + factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0)) + factor4 = self._factor4_p * factor1 + + # sigmax + x *= np.exp(-(self._visiting_param - 1.0) * np.log( + self._factor6 / factor4) / (3.0 - self._visiting_param)) + + den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) / + (3.0 - self._visiting_param)) + + return x / den + + +class EnergyState: + """ + Class used to record the energy state. At any time, it knows what is the + currently used coordinates and the most recent best location. + + Parameters + ---------- + lower : array_like + A 1-D NumPy ndarray containing lower bounds for generating an initial + random components in the `reset` method. + upper : array_like + A 1-D NumPy ndarray containing upper bounds for generating an initial + random components in the `reset` method + components. Neither NaN or inf are allowed. + callback : callable, ``callback(x, f, context)``, optional + A callback function which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and `context` has value in [0, 1, 2] + """ + # Maximum number of trials for generating a valid starting point + MAX_REINIT_COUNT = 1000 + + def __init__(self, lower, upper, callback=None): + self.ebest = None + self.current_energy = None + self.current_location = None + self.xbest = None + self.lower = lower + self.upper = upper + self.callback = callback + + def reset(self, func_wrapper, rand_gen, x0=None): + """ + Initialize current location is the search domain. If `x0` is not + provided, a random location within the bounds is generated. + """ + if x0 is None: + self.current_location = rand_gen.uniform(self.lower, self.upper, + size=len(self.lower)) + else: + self.current_location = np.copy(x0) + init_error = True + reinit_counter = 0 + while init_error: + self.current_energy = func_wrapper.fun(self.current_location) + if self.current_energy is None: + raise ValueError('Objective function is returning None') + if (not np.isfinite(self.current_energy) or np.isnan( + self.current_energy)): + if reinit_counter >= EnergyState.MAX_REINIT_COUNT: + init_error = False + message = ( + 'Stopping algorithm because function ' + 'create NaN or (+/-) infinity values even with ' + 'trying new random parameters' + ) + raise ValueError(message) + self.current_location = rand_gen.uniform(self.lower, + self.upper, + size=self.lower.size) + reinit_counter += 1 + else: + init_error = False + # If first time reset, initialize ebest and xbest + if self.ebest is None and self.xbest is None: + self.ebest = self.current_energy + self.xbest = np.copy(self.current_location) + # Otherwise, we keep them in case of reannealing reset + + def update_best(self, e, x, context): + self.ebest = e + self.xbest = np.copy(x) + if self.callback is not None: + val = self.callback(x, e, context) + if val is not None: + if val: + return ('Callback function requested to stop early by ' + 'returning True') + + def update_current(self, e, x): + self.current_energy = e + self.current_location = np.copy(x) + + +class StrategyChain: + """ + Class that implements within a Markov chain the strategy for location + acceptance and local search decision making. + + Parameters + ---------- + acceptance_param : float + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + visit_dist : VisitingDistribution + Instance of `VisitingDistribution` class. + func_wrapper : ObjectiveFunWrapper + Instance of `ObjectiveFunWrapper` class. + minimizer_wrapper: LocalSearchWrapper + Instance of `LocalSearchWrapper` class. + rand_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + energy_state: EnergyState + Instance of `EnergyState` class. + + """ + + def __init__(self, acceptance_param, visit_dist, func_wrapper, + minimizer_wrapper, rand_gen, energy_state): + # Local strategy chain minimum energy and location + self.emin = energy_state.current_energy + self.xmin = np.array(energy_state.current_location) + # Global optimizer state + self.energy_state = energy_state + # Acceptance parameter + self.acceptance_param = acceptance_param + # Visiting distribution instance + self.visit_dist = visit_dist + # Wrapper to objective function + self.func_wrapper = func_wrapper + # Wrapper to the local minimizer + self.minimizer_wrapper = minimizer_wrapper + self.not_improved_idx = 0 + self.not_improved_max_idx = 1000 + self._rand_gen = rand_gen + self.temperature_step = 0 + self.K = 100 * len(energy_state.current_location) + + def accept_reject(self, j, e, x_visit): + r = self._rand_gen.uniform() + pqv_temp = 1.0 - ((1.0 - self.acceptance_param) * + (e - self.energy_state.current_energy) / self.temperature_step) + if pqv_temp <= 0.: + pqv = 0. + else: + pqv = np.exp(np.log(pqv_temp) / ( + 1. - self.acceptance_param)) + + if r <= pqv: + # We accept the new location and update state + self.energy_state.update_current(e, x_visit) + self.xmin = np.copy(self.energy_state.current_location) + + # No improvement for a long time + if self.not_improved_idx >= self.not_improved_max_idx: + if j == 0 or self.energy_state.current_energy < self.emin: + self.emin = self.energy_state.current_energy + self.xmin = np.copy(self.energy_state.current_location) + + def run(self, step, temperature): + self.temperature_step = temperature / float(step + 1) + self.not_improved_idx += 1 + for j in range(self.energy_state.current_location.size * 2): + if j == 0: + if step == 0: + self.energy_state_improved = True + else: + self.energy_state_improved = False + x_visit = self.visit_dist.visiting( + self.energy_state.current_location, j, temperature) + # Calling the objective function + e = self.func_wrapper.fun(x_visit) + if e < self.energy_state.current_energy: + # We have got a better energy value + self.energy_state.update_current(e, x_visit) + if e < self.energy_state.ebest: + val = self.energy_state.update_best(e, x_visit, 0) + if val is not None: + if val: + return val + self.energy_state_improved = True + self.not_improved_idx = 0 + else: + # We have not improved but do we accept the new location? + self.accept_reject(j, e, x_visit) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during annealing') + # End of StrategyChain loop + + def local_search(self): + # Decision making for performing a local search + # based on strategy chain results + # If energy has been improved or no improvement since too long, + # performing a local search with the best strategy chain location + if self.energy_state_improved: + # Global energy has improved, let's see if LS improves further + e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest, + self.energy_state.ebest) + if e < self.energy_state.ebest: + self.not_improved_idx = 0 + val = self.energy_state.update_best(e, x, 1) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during local search') + # Check probability of a need to perform a LS even if no improvement + do_ls = False + if self.K < 90 * len(self.energy_state.current_location): + pls = np.exp(self.K * ( + self.energy_state.ebest - self.energy_state.current_energy) / + self.temperature_step) + if pls >= self._rand_gen.uniform(): + do_ls = True + # Global energy not improved, let's see what LS gives + # on the best strategy chain location + if self.not_improved_idx >= self.not_improved_max_idx: + do_ls = True + if do_ls: + e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin) + self.xmin = np.copy(x) + self.emin = e + self.not_improved_idx = 0 + self.not_improved_max_idx = self.energy_state.current_location.size + if e < self.energy_state.ebest: + val = self.energy_state.update_best( + self.emin, self.xmin, 2) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during dual annealing') + + +class ObjectiveFunWrapper: + + def __init__(self, func, maxfun=1e7, *args): + self.func = func + self.args = args + # Number of objective function evaluations + self.nfev = 0 + # Number of gradient function evaluation if used + self.ngev = 0 + # Number of hessian of the objective function if used + self.nhev = 0 + self.maxfun = maxfun + + def fun(self, x): + self.nfev += 1 + return self.func(x, *self.args) + + +class LocalSearchWrapper: + """ + Class used to wrap around the minimizer used for local search + Default local minimizer is SciPy minimizer L-BFGS-B + """ + + LS_MAXITER_RATIO = 6 + LS_MAXITER_MIN = 100 + LS_MAXITER_MAX = 1000 + + def __init__(self, search_bounds, func_wrapper, *args, **kwargs): + self.func_wrapper = func_wrapper + self.kwargs = kwargs + self.jac = self.kwargs.get('jac', None) + self.minimizer = minimize + bounds_list = list(zip(*search_bounds)) + self.lower = np.array(bounds_list[0]) + self.upper = np.array(bounds_list[1]) + + # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method + if not self.kwargs: + n = len(self.lower) + ls_max_iter = min(max(n * self.LS_MAXITER_RATIO, + self.LS_MAXITER_MIN), + self.LS_MAXITER_MAX) + self.kwargs['method'] = 'L-BFGS-B' + self.kwargs['options'] = { + 'maxiter': ls_max_iter, + } + self.kwargs['bounds'] = list(zip(self.lower, self.upper)) + elif callable(self.jac): + def wrapped_jac(x): + return self.jac(x, *args) + self.kwargs['jac'] = wrapped_jac + + def local_search(self, x, e): + # Run local search from the given x location where energy value is e + x_tmp = np.copy(x) + mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs) + if 'njev' in mres: + self.func_wrapper.ngev += mres.njev + if 'nhev' in mres: + self.func_wrapper.nhev += mres.nhev + # Check if is valid value + is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun) + in_bounds = np.all(mres.x >= self.lower) and np.all( + mres.x <= self.upper) + is_valid = is_finite and in_bounds + + # Use the new point only if it is valid and return a better results + if is_valid and mres.fun < e: + return mres.fun, mres.x + else: + return e, x_tmp + + +def dual_annealing(func, bounds, args=(), maxiter=1000, + minimizer_kwargs=None, initial_temp=5230., + restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, + maxfun=1e7, seed=None, no_local_search=False, + callback=None, x0=None): + """ + Find the global minimum of a function using Dual Annealing. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + maxiter : int, optional + The maximum number of global search iterations. Default value is 1000. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + (`minimize`). Some important options could be: + ``method`` for the minimizer method to use and ``args`` for + objective function additional arguments. + initial_temp : float, optional + The initial temperature, use higher values to facilitates a wider + search of the energy landscape, allowing dual_annealing to escape + local minima that it is trapped in. Default value is 5230. Range is + (0.01, 5.e4]. + restart_temp_ratio : float, optional + During the annealing process, temperature is decreasing, when it + reaches ``initial_temp * restart_temp_ratio``, the reannealing process + is triggered. Default value of the ratio is 2e-5. Range is (0, 1). + visit : float, optional + Parameter for visiting distribution. Default value is 2.62. Higher + values give the visiting distribution a heavier tail, this makes + the algorithm jump to a more distant region. The value range is (1, 3]. + accept : float, optional + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + maxfun : int, optional + Soft limit for the number of objective function calls. If the + algorithm is in the middle of a local search, this number will be + exceeded, the algorithm will stop just after the local search is + done. Default value is 1e7. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the visiting distribution function + and new coordinates generation. + no_local_search : bool, optional + If `no_local_search` is set to True, a traditional Generalized + Simulated Annealing will be performed with no local search + strategy applied. + callback : callable, optional + A callback function with signature ``callback(x, f, context)``, + which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and ``context`` has value in [0, 1, 2], with the + following meaning: + + - 0: minimum detected in the annealing process. + - 1: detection occurred in the local search process. + - 2: detection done in the dual annealing process. + + If the callback implementation returns True, the algorithm will stop. + x0 : ndarray, shape(n,), optional + Coordinates of a single N-D starting point. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. + See `OptimizeResult` for a description of other attributes. + + Notes + ----- + This function implements the Dual Annealing optimization. This stochastic + approach derived from [3]_ combines the generalization of CSA (Classical + Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled + to a strategy for applying a local search on accepted locations [4]_. + An alternative implementation of this same algorithm is described in [5]_ + and benchmarks are presented in [6]_. This approach introduces an advanced + method to refine the solution found by the generalized annealing + process. This algorithm uses a distorted Cauchy-Lorentz visiting + distribution, with its shape controlled by the parameter :math:`q_{v}` + + .. math:: + + g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\ + \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\ + \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\ + \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\ + \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}} + + Where :math:`t` is the artificial time. This visiting distribution is used + to generate a trial jump distance :math:`\\Delta x(t)` of variable + :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`. + + From the starting point, after calling the visiting distribution + function, the acceptance probability is computed as follows: + + .. math:: + + p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\ + \\frac{1}{1-q_{a}}}\\}} + + Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero + acceptance probability is assigned to the cases where + + .. math:: + + [1-(1-q_{a}) \\beta \\Delta E] < 0 + + The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to + + .. math:: + + T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\ + 1 + t\\right)^{q_{v}-1}-1} + + Where :math:`q_{v}` is the visiting parameter. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs + statistics. Journal of Statistical Physics, 52, 479-487 (1998). + .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing. + Physica A, 233, 395-406 (1996). + .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated + Annealing Algorithm and Its Application to the Thomson Model. + Physics Letters A, 233, 216-220 (1997). + .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated + Annealing. Physical Review E, 62, 4473 (2000). + .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized + Simulated Annealing for Efficient Global Optimization: the GenSA + Package for R. The R Journal, Volume 5/1 (2013). + .. [6] Mullen, K. Continuous Global Optimization in R. Journal of + Statistical Software, 60(6), 1 - 45, (2014). + :doi:`10.18637/jss.v060.i06` + + Examples + -------- + The following example is a 10-D problem, with many local minima. + The function involved is called Rastrigin + (https://en.wikipedia.org/wiki/Rastrigin_function) + + >>> import numpy as np + >>> from scipy.optimize import dual_annealing + >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) + >>> lw = [-5.12] * 10 + >>> up = [5.12] * 10 + >>> ret = dual_annealing(func, bounds=list(zip(lw, up))) + >>> ret.x + array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, + -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, + -6.05775280e-09, -5.00668935e-09]) # random + >>> ret.fun + 0.000000 + + """ + + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + if x0 is not None and not len(x0) == len(bounds): + raise ValueError('Bounds size does not match x0') + + lu = list(zip(*bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + # Check that restart temperature ratio is correct + if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.: + raise ValueError('Restart temperature ratio has to be in range (0, 1)') + # Checking bounds are valid + if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any( + np.isnan(lower)) or np.any(np.isnan(upper))): + raise ValueError('Some bounds values are inf values or nan values') + # Checking that bounds are consistent + if not np.all(lower < upper): + raise ValueError('Bounds are not consistent min < max') + # Checking that bounds are the same length + if not len(lower) == len(upper): + raise ValueError('Bounds do not have the same dimensions') + + # Wrapper for the objective function + func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) + + # minimizer_kwargs has to be a dict, not None + minimizer_kwargs = minimizer_kwargs or {} + + minimizer_wrapper = LocalSearchWrapper( + bounds, func_wrapper, *args, **minimizer_kwargs) + + # Initialization of random Generator for reproducible runs if seed provided + rand_state = check_random_state(seed) + # Initialization of the energy state + energy_state = EnergyState(lower, upper, callback) + energy_state.reset(func_wrapper, rand_state, x0) + # Minimum value of annealing temperature reached to perform + # re-annealing + temperature_restart = initial_temp * restart_temp_ratio + # VisitingDistribution instance + visit_dist = VisitingDistribution(lower, upper, visit, rand_state) + # Strategy chain instance + strategy_chain = StrategyChain(accept, visit_dist, func_wrapper, + minimizer_wrapper, rand_state, energy_state) + need_to_stop = False + iteration = 0 + message = [] + # OptimizeResult object to be returned + optimize_res = OptimizeResult() + optimize_res.success = True + optimize_res.status = 0 + + t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0 + # Run the search loop + while not need_to_stop: + for i in range(maxiter): + # Compute temperature for this step + s = float(i) + 2.0 + t2 = np.exp((visit - 1) * np.log(s)) - 1.0 + temperature = initial_temp * t1 / t2 + if iteration >= maxiter: + message.append("Maximum number of iteration reached") + need_to_stop = True + break + # Need a re-annealing process? + if temperature < temperature_restart: + energy_state.reset(func_wrapper, rand_state) + break + # starting strategy chain + val = strategy_chain.run(i, temperature) + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + # Possible local search at the end of the strategy chain + if not no_local_search: + val = strategy_chain.local_search() + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + iteration += 1 + + # Setting the OptimizeResult values + optimize_res.x = energy_state.xbest + optimize_res.fun = energy_state.ebest + optimize_res.nit = iteration + optimize_res.nfev = func_wrapper.nfev + optimize_res.njev = func_wrapper.ngev + optimize_res.nhev = func_wrapper.nhev + optimize_res.message = message + return optimize_res diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1b74ea88cdf158f14688b803c4304a5d06412fbc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..b8529e51e83b968fea24b0859c434b2440c47575 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py @@ -0,0 +1,430 @@ +"""Hessian update strategies for quasi-Newton optimization methods.""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import get_blas_funcs +from warnings import warn + + +__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] + + +class HessianUpdateStrategy: + """Interface for implementing Hessian update strategies. + + Many optimization methods make use of Hessian (or inverse Hessian) + approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. + Some of these approximations, however, do not actually need to store + the entire matrix or can compute the internal matrix product with a + given vector in a very efficiently manner. This class serves as an + abstract interface between the optimization algorithm and the + quasi-Newton update strategies, giving freedom of implementation + to store and update the internal matrix as efficiently as possible. + Different choices of initialization and update procedure will result + in different quasi-Newton strategies. + + Four methods should be implemented in derived classes: ``initialize``, + ``update``, ``dot`` and ``get_matrix``. + + Notes + ----- + Any instance of a class that implements this interface, + can be accepted by the method ``minimize`` and used by + the compatible solvers to approximate the Hessian (or + inverse Hessian) used by the optimization algorithms. + """ + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + raise NotImplementedError("The method ``initialize(n, approx_type)``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + raise NotImplementedError("The method ``update(delta_x, delta_grad)``" + " is not implemented.") + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + raise NotImplementedError("The method ``dot(p)``" + " is not implemented.") + + def get_matrix(self): + """Return current internal matrix. + + Returns + ------- + H : ndarray, shape (n, n) + Dense matrix containing either the Hessian + or its inverse (depending on how 'approx_type' + is defined). + """ + raise NotImplementedError("The method ``get_matrix(p)``" + " is not implemented.") + + +class FullHessianUpdateStrategy(HessianUpdateStrategy): + """Hessian update strategy with full dimensional internal representation. + """ + _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update + _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update + # Symmetric matrix-vector product + _symv = get_blas_funcs('symv', dtype='d') + + def __init__(self, init_scale='auto'): + self.init_scale = init_scale + # Until initialize is called we can't really use the class, + # so it makes sense to set everything to None. + self.first_iteration = None + self.approx_type = None + self.B = None + self.H = None + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + self.first_iteration = True + self.n = n + self.approx_type = approx_type + if approx_type not in ('hess', 'inv_hess'): + raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") + # Create matrix + if self.approx_type == 'hess': + self.B = np.eye(n, dtype=float) + else: + self.H = np.eye(n, dtype=float) + + def _auto_scale(self, delta_x, delta_grad): + # Heuristic to scale matrix at first iteration. + # Described in Nocedal and Wright "Numerical Optimization" + # p.143 formula (6.20). + s_norm2 = np.dot(delta_x, delta_x) + y_norm2 = np.dot(delta_grad, delta_grad) + ys = np.abs(np.dot(delta_grad, delta_x)) + if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: + return 1 + if self.approx_type == 'hess': + return y_norm2 / ys + else: + return ys / y_norm2 + + def _update_implementation(self, delta_x, delta_grad): + raise NotImplementedError("The method ``_update_implementation``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + if np.all(delta_x == 0.0): + return + if np.all(delta_grad == 0.0): + warn('delta_grad == 0.0. Check if the approximated ' + 'function is linear. If the function is linear ' + 'better results can be obtained by defining the ' + 'Hessian as zero instead of using quasi-Newton ' + 'approximations.', + UserWarning, stacklevel=2) + return + if self.first_iteration: + # Get user specific scale + if self.init_scale == "auto": + scale = self._auto_scale(delta_x, delta_grad) + else: + scale = float(self.init_scale) + # Scale initial matrix with ``scale * np.eye(n)`` + if self.approx_type == 'hess': + self.B *= scale + else: + self.H *= scale + self.first_iteration = False + self._update_implementation(delta_x, delta_grad) + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + if self.approx_type == 'hess': + return self._symv(1, self.B, p) + else: + return self._symv(1, self.H, p) + + def get_matrix(self): + """Return the current internal matrix. + + Returns + ------- + M : ndarray, shape (n, n) + Dense matrix containing either the Hessian or its inverse + (depending on how `approx_type` was defined). + """ + if self.approx_type == 'hess': + M = np.copy(self.B) + else: + M = np.copy(self.H) + li = np.tril_indices_from(M, k=-1) + M[li] = M.T[li] + return M + + +class BFGS(FullHessianUpdateStrategy): + """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + + Parameters + ---------- + exception_strategy : {'skip_update', 'damp_update'}, optional + Define how to proceed when the curvature condition is violated. + Set it to 'skip_update' to just skip the update. Or, alternatively, + set it to 'damp_update' to interpolate between the actual BFGS + result and the unmodified matrix. Both exceptions strategies + are explained in [1]_, p.536-537. + min_curvature : float + This number, scaled by a normalization factor, defines the + minimum curvature ``dot(delta_grad, delta_x)`` allowed to go + unaffected by the exception strategy. By default is equal to + 1e-8 when ``exception_strategy = 'skip_update'`` and equal + to 0.2 when ``exception_strategy = 'damp_update'``. + init_scale : {float, 'auto'} + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.140. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, exception_strategy='skip_update', min_curvature=None, + init_scale='auto'): + if exception_strategy == 'skip_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 1e-8 + elif exception_strategy == 'damp_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 0.2 + else: + raise ValueError("`exception_strategy` must be 'skip_update' " + "or 'damp_update'.") + + super().__init__(init_scale) + self.exception_strategy = exception_strategy + + def _update_inverse_hessian(self, ys, Hy, yHy, s): + """Update the inverse Hessian matrix. + + BFGS update using the formula: + + ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) + - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` + + where ``s = delta_x`` and ``y = delta_grad``. This formula is + equivalent to (6.17) in [1]_ written in a more efficient way + for implementation. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) + self.H = self._syr((ys+yHy)/ys**2, s, a=self.H) + + def _update_hessian(self, ys, Bs, sBs, y): + """Update the Hessian matrix. + + BFGS update using the formula: + + ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` + + where ``s`` is short for ``delta_x`` and ``y`` is short + for ``delta_grad``. Formula (6.19) in [1]_. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.B = self._syr(1.0 / ys, y, a=self.B) + self.B = self._syr(-1.0 / sBs, Bs, a=self.B) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + wz = np.dot(w, z) + Mw = self.dot(w) + wMw = Mw.dot(w) + # Guarantee that wMw > 0 by reinitializing matrix. + # While this is always true in exact arithmetic, + # indefinite matrix may appear due to roundoff errors. + if wMw <= 0.0: + scale = self._auto_scale(delta_x, delta_grad) + # Reinitialize matrix + if self.approx_type == 'hess': + self.B = scale * np.eye(self.n, dtype=float) + else: + self.H = scale * np.eye(self.n, dtype=float) + # Do common operations for new matrix + Mw = self.dot(w) + wMw = Mw.dot(w) + # Check if curvature condition is violated + if wz <= self.min_curvature * wMw: + # If the option 'skip_update' is set + # we just skip the update when the condition + # is violated. + if self.exception_strategy == 'skip_update': + return + # If the option 'damp_update' is set we + # interpolate between the actual BFGS + # result and the unmodified matrix. + elif self.exception_strategy == 'damp_update': + update_factor = (1-self.min_curvature) / (1 - wz/wMw) + z = update_factor*z + (1-update_factor)*Mw + wz = np.dot(w, z) + # Update matrix + if self.approx_type == 'hess': + self._update_hessian(wz, Mw, wMw, z) + else: + self._update_inverse_hessian(wz, Mw, wMw, z) + + +class SR1(FullHessianUpdateStrategy): + """Symmetric-rank-1 Hessian update strategy. + + Parameters + ---------- + min_denominator : float + This number, scaled by a normalization factor, + defines the minimum denominator magnitude allowed + in the update. When the condition is violated we skip + the update. By default uses ``1e-8``. + init_scale : {float, 'auto'}, optional + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.144-146. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, min_denominator=1e-8, init_scale='auto'): + self.min_denominator = min_denominator + super().__init__(init_scale) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + Mw = self.dot(w) + z_minus_Mw = z - Mw + denominator = np.dot(w, z_minus_Mw) + # If the denominator is too small + # we just skip the update. + if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): + return + # Update matrix + if self.approx_type == 'hess': + self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) + else: + self.H = self._syr(1/denominator, z_minus_Mw, a=self.H) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5b4aa4a483ef8bab2a33e8f2dac7875b508d26f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0944f083743f1c34847c3060278f5b7c40869251 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd @@ -0,0 +1,46 @@ +# cython: language_level=3 + +from libcpp cimport bool +from libcpp.string cimport string +from libcpp.vector cimport vector + +from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType +from .HighsSparseMatrix cimport HighsSparseMatrix + + +cdef extern from "HighsLp.h" nogil: + # From HiGHS/src/lp_data/HighsLp.h + cdef cppclass HighsLp: + int num_col_ + int num_row_ + + vector[double] col_cost_ + vector[double] col_lower_ + vector[double] col_upper_ + vector[double] row_lower_ + vector[double] row_upper_ + + HighsSparseMatrix a_matrix_ + + ObjSense sense_ + double offset_ + + string model_name_ + + vector[string] row_names_ + vector[string] col_names_ + + vector[HighsVarType] integrality_ + + bool isMip() const + + cdef cppclass HighsSolution: + vector[double] col_value + vector[double] col_dual + vector[double] row_value + vector[double] row_dual + + cdef cppclass HighsBasis: + bool valid_ + vector[HighsBasisStatus] col_status + vector[HighsBasisStatus] row_status diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e28140caec1a7873b565c4d2128f7aa80c51c946 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py new file mode 100644 index 0000000000000000000000000000000000000000..42ad9038ef0ce4c29b0cf22c5c9d2a1c029827c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py @@ -0,0 +1,543 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from . import _lbfgsb +from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal . It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + Examples + -------- + Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define + an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the + observations and `y_model` the prediction of the linear model as + ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily + chosen as ``(0,5)`` and ``(5,10)`` for this example. + + >>> import numpy as np + >>> from scipy.optimize import fmin_l_bfgs_b + >>> X = np.arange(0, 10, 1) + >>> M = 2 + >>> B = 3 + >>> Y = M * X + B + >>> def func(parameters, *args): + ... x = args[0] + ... y = args[1] + ... m, b = parameters + ... y_model = m*x + b + ... error = sum(np.power((y - y_model), 2)) + ... return error + + >>> initial_values = np.array([0.0, 1.0]) + + >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True) + >>> x_opt, f_opt + array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary + + The optimized parameters in ``x_opt`` agree with the ground truth parameters + ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds` + parameter. + + >>> bounds = [(0, 5), (5, 10)] + >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True, bounds=bounds) + >>> x_opt, f_opt + array([1.65990508, 5.31649385]), 15.721334516453945 # may vary + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + callback = _wrap_callback(callback) + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + fortran_int = _lbfgsb.types.intvar.dtype + + nbd = zeros(n, fortran_int) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + l, u = bounds[0, i], bounds[1, i] + if not np.isinf(l): + low_bnd[i] = l + l = 1 + if not np.isinf(u): + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, fortran_int) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, fortran_int) + isave = zeros(44, fortran_int) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying Fortran code + # expects float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tobytes() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[:] = 'STOP: CALLBACK REQUESTED HALT' + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tobytes().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + task_str = task_str.decode() + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linesearch.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6038d2b95b13f7a2f42558a0e68023fb127352 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linesearch.py @@ -0,0 +1,897 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + line_search_armijo + line_search_wolfe1 + line_search_wolfe2 + scalar_search_wolfe1 + scalar_search_wolfe2 + +""" +from warnings import warn + +from scipy.optimize import _minpack2 as minpack2 # noqa: F401 +from ._dcsrch import DCSRCH +import numpy as np + +__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', + 'scalar_search_wolfe1', 'scalar_search_wolfe2', + 'line_search_armijo'] + +class LineSearchWarning(RuntimeWarning): + pass + + +def _check_c1_c2(c1, c2): + if not (0 < c1 < c2 < 1): + raise ValueError("'c1' and 'c2' do not satisfy" + "'0 < c1 < c2 < 1'.") + + +#------------------------------------------------------------------------------ +# Minpack's Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_wolfe1(f, fprime, xk, pk, gfk=None, + old_fval=None, old_old_fval=None, + args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, + xtol=1e-14): + """ + As `scalar_search_wolfe1` but do a line search to direction `pk` + + Parameters + ---------- + f : callable + Function `f(x)` + fprime : callable + Gradient of `f` + xk : array_like + Current point + pk : array_like + Search direction + gfk : array_like, optional + Gradient of `f` at point `xk` + old_fval : float, optional + Value of `f` at point `xk` + old_old_fval : float, optional + Value of `f` at point preceding `xk` + + The rest of the parameters are the same as for `scalar_search_wolfe1`. + + Returns + ------- + stp, f_count, g_count, fval, old_fval + As in `line_search_wolfe1` + gval : array + Gradient of `f` at the final point + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + """ + if gfk is None: + gfk = fprime(xk, *args) + + gval = [gfk] + gc = [0] + fc = [0] + + def phi(s): + fc[0] += 1 + return f(xk + s*pk, *args) + + def derphi(s): + gval[0] = fprime(xk + s*pk, *args) + gc[0] += 1 + return np.dot(gval[0], pk) + + derphi0 = np.dot(gfk, pk) + + stp, fval, old_fval = scalar_search_wolfe1( + phi, derphi, old_fval, old_old_fval, derphi0, + c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) + + return stp, fc[0], gc[0], fval, old_fval, gval[0] + + +def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, + amax=50, amin=1e-8, xtol=1e-14): + """ + Scalar function search for alpha that satisfies strong Wolfe conditions + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0 + old_phi0 : float, optional + Value of phi at previous point + derphi0 : float, optional + Value derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax, amin : float, optional + Maximum and minimum step size + xtol : float, optional + Relative tolerance for an acceptable step. + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found + phi : float + Value of `phi` at the new point `alpha` + phi0 : float + Value of `phi` at `alpha=0` + + Notes + ----- + Uses routine DCSRCH from MINPACK. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_. + + References + ---------- + + .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization. + In Springer Series in Operations Research and Financial Engineering. + (Springer Series in Operations Research and Financial Engineering). + Springer Nature. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + if derphi0 is None: + derphi0 = derphi(0.) + + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + if alpha1 < 0: + alpha1 = 1.0 + else: + alpha1 = 1.0 + + maxiter = 100 + + dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax) + stp, phi1, phi0, task = dcsrch( + alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter + ) + + return stp, phi1, phi0 + + +line_search = line_search_wolfe1 + + +#------------------------------------------------------------------------------ +# Pure-Python Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +# Note: `line_search_wolfe2` is the public `scipy.optimize.line_search` + +def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, + old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + Parameters + ---------- + f : callable f(x,*args) + Objective function. + myfprime : callable f'(x,*args) + Objective function gradient. + xk : ndarray + Starting point. + pk : ndarray + Search direction. The search direction must be a descent direction + for the algorithm to converge. + gfk : ndarray, optional + Gradient value for x=xk (xk being the current parameter + estimate). Will be recomputed if omitted. + old_fval : float, optional + Function value for x=xk. Will be recomputed if omitted. + old_old_fval : float, optional + Function value for the point preceding x=xk. + args : tuple, optional + Additional arguments passed to objective function. + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, x, f, g)`` + returning a boolean. Arguments are the proposed step ``alpha`` + and the corresponding ``x``, ``f`` and ``g`` values. The line search + accepts the value of ``alpha`` only if this + callable returns ``True``. If the callable returns ``False`` + for the step length, the algorithm will continue with + new iterates. The callable is only called for iterates + satisfying the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha : float or None + Alpha for which ``x_new = x0 + alpha * pk``, + or None if the line search algorithm did not converge. + fc : int + Number of function evaluations made. + gc : int + Number of gradient evaluations made. + new_fval : float or None + New function value ``f(x_new)=f(x0+alpha*pk)``, + or None if the line search algorithm did not converge. + old_fval : float + Old function value ``f(x0)``. + new_slope : float or None + The local slope along the search direction at the + new value ````, + or None if the line search algorithm did not converge. + + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + The search direction `pk` must be a descent direction (e.g. + ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe + conditions. If the search direction is not a descent direction (e.g. + ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import line_search + + A objective function and its gradient are defined. + + >>> def obj_func(x): + ... return (x[0])**2+(x[1])**2 + >>> def obj_grad(x): + ... return [2*x[0], 2*x[1]] + + We can find alpha that satisfies strong Wolfe conditions. + + >>> start_point = np.array([1.8, 1.7]) + >>> search_gradient = np.array([-1.0, -1.0]) + >>> line_search(obj_func, obj_grad, start_point, search_gradient) + (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4]) + + """ + fc = [0] + gc = [0] + gval = [None] + gval_alpha = [None] + + def phi(alpha): + fc[0] += 1 + return f(xk + alpha * pk, *args) + + fprime = myfprime + + def derphi(alpha): + gc[0] += 1 + gval[0] = fprime(xk + alpha * pk, *args) # store for later use + gval_alpha[0] = alpha + return np.dot(gval[0], pk) + + if gfk is None: + gfk = fprime(xk, *args) + derphi0 = np.dot(gfk, pk) + + if extra_condition is not None: + # Add the current gradient as argument, to avoid needless + # re-evaluation + def extra_condition2(alpha, phi): + if gval_alpha[0] != alpha: + derphi(alpha) + x = xk + alpha * pk + return extra_condition(alpha, x, phi, gval[0]) + else: + extra_condition2 = None + + alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( + phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, + extra_condition2, maxiter=maxiter) + + if derphi_star is None: + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + else: + # derphi_star is a number (derphi) -- so use the most recently + # calculated gradient used in computing it derphi = gfk*pk + # this is the gradient at the next step no need to compute it + # again in the outer loop. + derphi_star = gval[0] + + return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star + + +def scalar_search_wolfe2(phi, derphi, phi0=None, + old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Objective scalar function. + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0. + old_phi0 : float, optional + Value of phi at previous point. + derphi0 : float, optional + Value of derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size. + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, phi_value)`` + returning a boolean. The line search accepts the value + of ``alpha`` only if this callable returns ``True``. + If the callable returns ``False`` for the step length, + the algorithm will continue with new iterates. + The callable is only called for iterates satisfying + the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha_star : float or None + Best alpha, or None if the line search algorithm did not converge. + phi_star : float + phi at alpha_star. + phi0 : float + phi at 0. + derphi_star : float or None + derphi at alpha_star, or None if the line search algorithm + did not converge. + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + + if derphi0 is None: + derphi0 = derphi(0.) + + alpha0 = 0 + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + else: + alpha1 = 1.0 + + if alpha1 < 0: + alpha1 = 1.0 + + if amax is not None: + alpha1 = min(alpha1, amax) + + phi_a1 = phi(alpha1) + #derphi_a1 = derphi(alpha1) evaluated below + + phi_a0 = phi0 + derphi_a0 = derphi0 + + if extra_condition is None: + def extra_condition(alpha, phi): + return True + + for i in range(maxiter): + if alpha1 == 0 or (amax is not None and alpha0 > amax): + # alpha1 == 0: This shouldn't happen. Perhaps the increment has + # slipped below machine precision? + alpha_star = None + phi_star = phi0 + phi0 = old_phi0 + derphi_star = None + + if alpha1 == 0: + msg = 'Rounding errors prevent the line search from converging' + else: + msg = "The line search algorithm could not find a solution " + \ + "less than or equal to amax: %s" % amax + + warn(msg, LineSearchWarning, stacklevel=2) + break + + not_first_iteration = i > 0 + if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ + ((phi_a1 >= phi_a0) and not_first_iteration): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha0, alpha1, phi_a0, + phi_a1, derphi_a0, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + derphi_a1 = derphi(alpha1) + if (abs(derphi_a1) <= -c2*derphi0): + if extra_condition(alpha1, phi_a1): + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = derphi_a1 + break + + if (derphi_a1 >= 0): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha1, alpha0, phi_a1, + phi_a0, derphi_a1, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + alpha2 = 2 * alpha1 # increase by factor of two on each iteration + if amax is not None: + alpha2 = min(alpha2, amax) + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi(alpha1) + derphi_a0 = derphi_a1 + + else: + # stopping test maxiter reached + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = None + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + + return alpha_star, phi_star, phi0, derphi_star + + +def _cubicmin(a, fa, fpa, b, fb, c, fc): + """ + Finds the minimizer for a cubic polynomial that goes through the + points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. + + If no minimizer can be found, return None. + + """ + # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D + + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + C = fpa + db = b - a + dc = c - a + denom = (db * dc) ** 2 * (db - dc) + d1 = np.empty((2, 2)) + d1[0, 0] = dc ** 2 + d1[0, 1] = -db ** 2 + d1[1, 0] = -dc ** 3 + d1[1, 1] = db ** 3 + [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, + fc - fa - C * dc]).flatten()) + A /= denom + B /= denom + radical = B * B - 3 * A * C + xmin = a + (-B + np.sqrt(radical)) / (3 * A) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _quadmin(a, fa, fpa, b, fb): + """ + Finds the minimizer for a quadratic polynomial that goes through + the points (a,fa), (b,fb) with derivative at a of fpa. + + """ + # f(x) = B*(x-a)^2 + C*(x-a) + D + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + D = fa + C = fpa + db = b - a * 1.0 + B = (fb - D - C * db) / (db * db) + xmin = a - C / (2.0 * B) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, + phi, derphi, phi0, derphi0, c1, c2, extra_condition): + """Zoom stage of approximate linesearch satisfying strong Wolfe conditions. + + Part of the optimization algorithm in `scalar_search_wolfe2`. + + Notes + ----- + Implements Algorithm 3.6 (zoom) in Wright and Nocedal, + 'Numerical Optimization', 1999, pp. 61. + + """ + + maxiter = 10 + i = 0 + delta1 = 0.2 # cubic interpolant check + delta2 = 0.1 # quadratic interpolant check + phi_rec = phi0 + a_rec = 0 + while True: + # interpolate to find a trial step length between a_lo and + # a_hi Need to choose interpolation here. Use cubic + # interpolation and then if the result is within delta * + # dalpha or outside of the interval bounded by a_lo or a_hi + # then use quadratic interpolation, if the result is still too + # close, then use bisection + + dalpha = a_hi - a_lo + if dalpha < 0: + a, b = a_hi, a_lo + else: + a, b = a_lo, a_hi + + # minimizer of cubic interpolant + # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) + # + # if the result is too close to the end points (or out of the + # interval), then use quadratic interpolation with phi_lo, + # derphi_lo and phi_hi if the result is still too close to the + # end points (or out of the interval) then use bisection + + if (i > 0): + cchk = delta1 * dalpha + a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, + a_rec, phi_rec) + if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): + qchk = delta2 * dalpha + a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) + if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): + a_j = a_lo + 0.5*dalpha + + # Check new value of a_j + + phi_aj = phi(a_j) + if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_j + phi_hi = phi_aj + else: + derphi_aj = derphi(a_j) + if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): + a_star = a_j + val_star = phi_aj + valprime_star = derphi_aj + break + if derphi_aj*(a_hi - a_lo) >= 0: + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_lo + phi_hi = phi_lo + else: + phi_rec = phi_lo + a_rec = a_lo + a_lo = a_j + phi_lo = phi_aj + derphi_lo = derphi_aj + i += 1 + if (i > maxiter): + # Failed to find a conforming step size + a_star = None + val_star = None + valprime_star = None + break + return a_star, val_star, valprime_star + + +#------------------------------------------------------------------------------ +# Armijo line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """Minimize over alpha, the function ``f(xk+alpha pk)``. + + Parameters + ---------- + f : callable + Function to be minimized. + xk : array_like + Current point. + pk : array_like + Search direction. + gfk : array_like + Gradient of `f` at point `xk`. + old_fval : float + Value of `f` at point `xk`. + args : tuple, optional + Optional arguments. + c1 : float, optional + Value to control stopping criterion. + alpha0 : scalar, optional + Value of `alpha` at start of the optimization. + + Returns + ------- + alpha + f_count + f_val_at_alpha + + Notes + ----- + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + """ + xk = np.atleast_1d(xk) + fc = [0] + + def phi(alpha1): + fc[0] += 1 + return f(xk + alpha1*pk, *args) + + if old_fval is None: + phi0 = phi(0.) + else: + phi0 = old_fval # compute f(xk) -- done in past loop + + derphi0 = np.dot(gfk, pk) + alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, + alpha0=alpha0) + return alpha, fc[0], phi1 + + +def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """ + Compatibility wrapper for `line_search_armijo` + """ + r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, + alpha0=alpha0) + return r[0], r[1], 0, r[2] + + +def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): + """Minimize over alpha, the function ``phi(alpha)``. + + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + alpha > 0 is assumed to be a descent direction. + + Returns + ------- + alpha + phi1 + + """ + phi_a0 = phi(alpha0) + if phi_a0 <= phi0 + c1*alpha0*derphi0: + return alpha0, phi_a0 + + # Otherwise, compute the minimizer of a quadratic interpolant: + + alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) + phi_a1 = phi(alpha1) + + if (phi_a1 <= phi0 + c1*alpha1*derphi0): + return alpha1, phi_a1 + + # Otherwise, loop with cubic interpolation until we find an alpha which + # satisfies the first Wolfe condition (since we are backtracking, we will + # assume that the value of alpha is not too small and satisfies the second + # condition. + + while alpha1 > amin: # we are assuming alpha>0 is a descent direction + factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) + a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ + alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) + a = a / factor + b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ + alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) + b = b / factor + + alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) + phi_a2 = phi(alpha2) + + if (phi_a2 <= phi0 + c1*alpha2*derphi0): + return alpha2, phi_a2 + + if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: + alpha2 = alpha1 / 2.0 + + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi_a2 + + # Failed to find a suitable step length + return None, phi_a1 + + +#------------------------------------------------------------------------------ +# Non-monotone line search for DF-SANE +#------------------------------------------------------------------------------ + +def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5): + """ + Nonmonotone backtracking line search as described in [1]_ + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + prev_fs : float + List of previous merit function values. Should have ``len(prev_fs) <= M`` + where ``M`` is the nonmonotonicity window parameter. + eta : float + Allowed merit function increase, see [1]_ + gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + + References + ---------- + [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + + """ + f_k = prev_fs[-1] + f_bar = max(prev_fs) + + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + return alpha, xp, fp, Fp + + +def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5, + nu=0.85): + """ + Nonmonotone line search from [1] + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + f_k : float + Initial merit function value. + C, Q : float + Control parameters. On the first iteration, give values + Q=1.0, C=f_k + eta : float + Allowed merit function increase, see [1]_ + nu, gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + C : float + New value for the control parameter C + Q : float + New value for the control parameter Q + + References + ---------- + .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line + search and its application to the spectral residual + method'', IMA J. Numer. Anal. 29, 814 (2009). + + """ + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + # Update C and Q + Q_next = nu * Q + 1 + C = (nu * Q * (C + eta) + fp) / Q_next + Q = Q_next + + return alpha, xp, fp, Fp, C, Q diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..5deb51bd455857e9c3767bb13157bb9ab8a86a34 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog.py @@ -0,0 +1,714 @@ +""" +A top-level linear programming interface. + +.. versionadded:: 0.15.0 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + linprog + linprog_verbose_callback + linprog_terse_callback + +""" + +import numpy as np + +from ._optimize import OptimizeResult, OptimizeWarning +from warnings import warn +from ._linprog_highs import _linprog_highs +from ._linprog_ip import _linprog_ip +from ._linprog_simplex import _linprog_simplex +from ._linprog_rs import _linprog_rs +from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401 + _linprog_rs_doc, _linprog_simplex_doc, + _linprog_highs_ipm_doc, _linprog_highs_ds_doc) +from ._linprog_util import ( + _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale, + _postsolve, _check_result, _display_summary) +from copy import deepcopy + +__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] + +__docformat__ = "restructuredtext en" + +LINPROG_METHODS = [ + 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm' +] + + +def linprog_verbose_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces detailed output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + x = res['x'] + fun = res['fun'] + phase = res['phase'] + status = res['status'] + nit = res['nit'] + message = res['message'] + complete = res['complete'] + + saved_printoptions = np.get_printoptions() + np.set_printoptions(linewidth=500, + formatter={'float': lambda x: f"{x: 12.4f}"}) + if status: + print('--------- Simplex Early Exit -------\n') + print(f'The simplex method exited early with status {status:d}') + print(message) + elif complete: + print('--------- Simplex Complete --------\n') + print(f'Iterations required: {nit}') + else: + print(f'--------- Iteration {nit:d} ---------\n') + + if nit > 0: + if phase == 1: + print('Current Pseudo-Objective Value:') + else: + print('Current Objective Value:') + print('f = ', fun) + print() + print('Current Solution Vector:') + print('x = ', x) + print() + + np.set_printoptions(**saved_printoptions) + + +def linprog_terse_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces brief output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x``. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + nit = res['nit'] + x = res['x'] + + if nit == 0: + print("Iter: X:") + print(f"{nit: <5d} ", end="") + print(x) + + +def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=(0, None), method='highs', callback=None, + options=None, x0=None, integrality=None): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + - minimize :: + + c @ x + + - such that :: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be + specified with ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. + If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` + will serve as bounds for all decision variables. + Use ``None`` to indicate that there is no bound. For instance, the + default bound ``(0, None)`` means that all decision variables are + non-negative, and the pair ``(None, None)`` means no bounds at all, + i.e. all variables are allowed to be any real. + method : str, optional + The algorithm used to solve the standard form problem. + :ref:`'highs' ` (default), + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (legacy), + :ref:`'revised simplex' ` (legacy), + and + :ref:`'simplex' ` (legacy) are supported. + The legacy methods are deprecated and will be removed in SciPy 1.11.0. + callback : callable, optional + If a callback function is provided, it will be called at least once per + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The current solution vector. + fun : float + The current value of the objective function ``c @ x``. + success : bool + ``True`` when the algorithm has completed successfully. + slack : 1-D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + phase : int + The phase of the algorithm being executed. + status : int + An integer representing the status of the algorithm. + + ``0`` : Optimization proceeding nominally. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The current iteration number. + message : str + A string descriptor of the algorithm status. + + Callback functions are not currently supported by the HiGHS methods. + + options : dict, optional + A dictionary of solver options. All methods accept the following + options: + + maxiter : int + Maximum number of iterations to perform. + Default: see method-specific documentation. + disp : bool + Set to ``True`` to print convergence messages. + Default: ``False``. + presolve : bool + Set to ``False`` to disable automatic presolve. + Default: ``True``. + + All methods except the HiGHS solvers also accept: + + tol : float + A tolerance which determines when a residual is "close enough" to + zero to be considered exactly zero. + autoscale : bool + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + Default: ``False``. + rr : bool + Set to ``False`` to disable automatic redundancy removal. + Default: ``True``. + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. For problems with + dense input, the available methods for redundancy removal are: + + "SVD": + Repeatedly performs singular value decomposition on + the matrix, detecting redundant rows based on nonzeros + in the left singular vectors that correspond with + zero singular values. May be fast when the matrix is + nearly full rank. + "pivot": + Uses the algorithm presented in [5]_ to identify + redundant rows. + "ID": + Uses a randomized interpolative decomposition. + Identifies columns of the matrix transpose not used in + a full-rank interpolative decomposition of the matrix. + None: + Uses "svd" if the matrix is nearly full rank, that is, + the difference between the matrix rank and the number + of rows is less than five. If not, uses "pivot". The + behavior of this default is subject to change without + prior notice. + + Default: None. + For problems with sparse input, this option is ignored, and the + pivot-based algorithm presented in [5]_ is used. + + For method-specific options, see + :func:`show_options('linprog') `. + + x0 : 1-D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcasted to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields + below. Note that the return types of the fields may depend on whether + the optimization was successful, therefore it is recommended to check + `OptimizeResult.status` before relying on the other fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The total number of iterations performed in all phases. + message : str + A string descriptor of the exit status of the algorithm. + + See Also + -------- + show_options : Additional options accepted by the solvers. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + `'highs-ds'` and + `'highs-ipm'` are interfaces to the + HiGHS simplex and interior-point method solvers [13]_, respectively. + `'highs'` (default) chooses between + the two automatically. These are the fastest linear + programming solvers in SciPy, especially for large, sparse problems; + which of these two is faster is problem-dependent. + The other solvers (`'interior-point'`, `'revised simplex'`, and + `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0. + + Method *highs-ds* is a wrapper of the C++ high performance dual + revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm* + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method *highs* chooses between the two automatically. + For new code involving `linprog`, we recommend explicitly choosing one of + these three method values. + + .. versionadded:: 1.6.0 + + Method *interior-point* uses the primal-dual path following algorithm + as outlined in [4]_. This algorithm supports sparse constraint matrices and + is typically faster than the simplex methods, especially for large, sparse + problems. Note, however, that the solution returned may be slightly less + accurate than those of the simplex methods and will not, in general, + correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + + Method *revised simplex* uses the revised simplex method as described in + [9]_, except that a factorization [11]_ of the basis matrix, rather than + its inverse, is efficiently maintained and used to solve the linear systems + at each iteration of the algorithm. + + .. versionadded:: 1.3.0 + + Method *simplex* uses a traditional, full-tableau implementation of + Dantzig's simplex algorithm [1]_, [2]_ (*not* the + Nelder-Mead simplex). This algorithm is included for backwards + compatibility and educational purposes. + + .. versionadded:: 0.15.0 + + Before applying *interior-point*, *revised simplex*, or *simplex*, + a presolve procedure based on [8]_ attempts + to identify trivial infeasibilities, trivial unboundedness, and potential + problem simplifications. Specifically, it checks for: + + - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; + - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained + variables; + - column singletons in ``A_eq``, representing fixed variables; and + - column singletons in ``A_ub``, representing simple bounds. + + If presolve reveals that the problem is unbounded (e.g. an unconstrained + and unbounded variable has negative cost) or infeasible (e.g., a row of + zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver + terminates with the appropriate status code. Note that presolve terminates + as soon as any sign of unboundedness is detected; consequently, a problem + may be reported as unbounded when in reality the problem is infeasible + (but infeasibility has not been detected yet). Therefore, if it is + important to know whether the problem is actually infeasible, solve the + problem again with option ``presolve=False``. + + If neither infeasibility nor unboundedness are detected in a single pass + of the presolve, bounds are tightened where possible and fixed + variables are removed from the problem. Then, linearly dependent rows + of the ``A_eq`` matrix are removed, (unless they represent an + infeasibility) to avoid numerical difficulties in the primary solve + routine. Note that rows that are nearly linearly dependent (within a + prescribed tolerance) may also be removed, which can change the optimal + solution in rare cases. If this is a concern, eliminate redundancy from + your problem formulation and run with option ``rr=False`` or + ``presolve=False``. + + Several potential improvements can be made here: additional presolve + checks outlined in [8]_ should be implemented, the presolve routine should + be run multiple times (until no further simplifications can be made), and + more of the efficiency improvements from [5]_ should be implemented in the + redundancy removal routines. + + After presolve, the problem is transformed to standard form by converting + the (tightened) simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + Optionally, the problem is automatically scaled via equilibration [12]_. + The selected algorithm solves the standard form problem, and a + postprocessing routine converts the result to a solution to the original + problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." + Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at + http://www.4er.org/CourseNotes/Book%20B/B-III.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + .. [11] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + .. [12] Tomlin, J. A. "On scaling linear programming problems." + Mathematical Programming Study 4 (1975): 146-166. + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the following problem: + + .. math:: + + \min_{x_0, x_1} \ -x_0 + 4x_1 & \\ + \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ + -x_0 - 2x_1 & \geq -4,\\ + x_1 & \geq -3. + + The problem is not presented in the form accepted by `linprog`. This is + easily remedied by converting the "greater than" inequality + constraint to a "less than" inequality constraint by + multiplying both sides by a factor of :math:`-1`. Note also that the last + constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. + Finally, since there are no bounds on :math:`x_0`, we must explicitly + specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the + default is for variables to be non-negative. After collecting coeffecients + into arrays and tuples, the input for this problem is: + + >>> from scipy.optimize import linprog + >>> c = [-1, 4] + >>> A = [[-3, 1], [1, 2]] + >>> b = [6, 4] + >>> x0_bounds = (None, None) + >>> x1_bounds = (-3, None) + >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) + >>> res.fun + -22.0 + >>> res.x + array([10., -3.]) + >>> res.message + 'Optimization terminated successfully. (HiGHS Status 7: Optimal)' + + The marginals (AKA dual values / shadow prices / Lagrange multipliers) + and residuals (slacks) are also available. + + >>> res.ineqlin + residual: [ 3.900e+01 0.000e+00] + marginals: [-0.000e+00 -1.000e+00] + + For example, because the marginal associated with the second inequality + constraint is -1, we expect the optimal value of the objective function + to decrease by ``eps`` if we add a small amount ``eps`` to the right hand + side of the second inequality constraint: + + >>> eps = 0.05 + >>> b[1] += eps + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.05 + + Also, because the residual on the first inequality constraint is 39, we + can decrease the right hand side of the first constraint by 39 without + affecting the optimal solution. + + >>> b = [6, 4] # reset to original values + >>> b[0] -= 39 + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.0 + + """ + + meth = method.lower() + methods = {"highs", "highs-ds", "highs-ipm", + "simplex", "revised simplex", "interior-point"} + + if meth not in methods: + raise ValueError(f"Unknown solver '{method}'") + + if x0 is not None and meth != "revised simplex": + warning_message = "x0 is used only when method is 'revised simplex'. " + warn(warning_message, OptimizeWarning, stacklevel=2) + + if np.any(integrality) and not meth == "highs": + integrality = None + warning_message = ("Only `method='highs'` supports integer " + "constraints. Ignoring `integrality`.") + warn(warning_message, OptimizeWarning, stacklevel=2) + elif np.any(integrality): + integrality = np.broadcast_to(integrality, np.shape(c)) + + lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality) + lp, solver_options = _parse_linprog(lp, options, meth) + tol = solver_options.get('tol', 1e-9) + + # Give unmodified problem to HiGHS + if meth.startswith('highs'): + if callback is not None: + raise NotImplementedError("HiGHS solvers do not support the " + "callback interface.") + highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex', + 'highs': None} + + sol = _linprog_highs(lp, solver=highs_solvers[meth], + **solver_options) + sol['status'], sol['message'] = ( + _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'], + sol['con'], lp.bounds, tol, sol['message'], + integrality)) + sol['success'] = sol['status'] == 0 + return OptimizeResult(sol) + + warn(f"`method='{meth}'` is deprecated and will be removed in SciPy " + "1.11.0. Please use one of the HiGHS solvers (e.g. " + "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2) + + iteration = 0 + complete = False # will become True if solved in presolve + undo = [] + + # Keep the original arrays to calculate slack/residuals for original + # problem. + lp_o = deepcopy(lp) + + # Solve trivial problem, eliminate variables, tighten bounds, etc. + rr_method = solver_options.pop('rr_method', None) # need to pop these; + rr = solver_options.pop('rr', True) # they're not passed to methods + c0 = 0 # we might get a constant term in the objective + if solver_options.pop('presolve', True): + (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, + rr_method, + tol) + + C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used + postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale) + + if not complete: + A, b, c, c0, x0 = _get_Abc(lp, c0) + if solver_options.pop('autoscale', False): + A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0) + postsolve_args = postsolve_args[:-2] + (C, b_scale) + + if meth == 'simplex': + x, status, message, iteration = _linprog_simplex( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'interior-point': + x, status, message, iteration = _linprog_ip( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'revised simplex': + x, status, message, iteration = _linprog_rs( + c, c0=c0, A=A, b=b, x0=x0, callback=callback, + postsolve_args=postsolve_args, **solver_options) + + # Eliminate artificial variables, re-introduce presolved variables, etc. + disp = solver_options.get('disp', False) + + x, fun, slack, con = _postsolve(x, postsolve_args, complete) + + status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, + tol, message, integrality) + + if disp: + _display_summary(message, status, fun, iteration) + + sol = { + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': iteration, + 'success': status == 0} + + return OptimizeResult(sol) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..73bca3037f0e548f2420ba6be220446e94ddeb69 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py @@ -0,0 +1,1126 @@ +"""Interior-point method for linear programming + +The *interior-point* method uses the primal-dual path following algorithm +outlined in [1]_. This algorithm supports sparse constraint matrices and +is typically faster than the simplex methods, especially for large, sparse +problems. Note, however, that the solution returned may be slightly less +accurate than those of the simplex methods and will not, in general, +correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + +References +---------- +.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. +""" +# Author: Matt Haberland + +import numpy as np +import scipy as sp +import scipy.sparse as sps +from warnings import warn +from scipy.linalg import LinAlgError +from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options +from ._linprog_util import _postsolve +has_umfpack = True +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 + from sksparse.cholmod import analyze as cholmod_analyze +except ImportError: + has_cholmod = False +try: + import scikits.umfpack # test whether to use factorized # noqa: F401 +except ImportError: + has_umfpack = False + + +def _get_solver(M, sparse=False, lstsq=False, sym_pos=True, + cholesky=True, permc_spec='MMD_AT_PLUS_A'): + """ + Given solver options, return a handle to the appropriate linear system + solver. + + Parameters + ---------- + M : 2-D array + As defined in [4] Equation 8.31 + sparse : bool (default = False) + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool (default = False) + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool (default = True) + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool (default = True) + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + permc_spec : str (default = 'MMD_AT_PLUS_A') + Sparsity preservation strategy used by SuperLU. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + See SuperLU documentation. + + Returns + ------- + solve : function + Handle to the appropriate solver function + + """ + try: + if sparse: + if lstsq: + def solve(r, sym_pos=False): + return sps.linalg.lsqr(M, r)[0] + elif cholesky: + try: + # Will raise an exception in the first call, + # or when the matrix changes due to a new problem + _get_solver.cholmod_factor.cholesky_inplace(M) + except Exception: + _get_solver.cholmod_factor = cholmod_analyze(M) + _get_solver.cholmod_factor.cholesky_inplace(M) + solve = _get_solver.cholmod_factor + else: + if has_umfpack and sym_pos: + solve = sps.linalg.factorized(M) + else: # factorized doesn't pass permc_spec + solve = sps.linalg.splu(M, permc_spec=permc_spec).solve + + else: + if lstsq: # sometimes necessary as solution is approached + def solve(r): + return sp.linalg.lstsq(M, r)[0] + elif cholesky: + L = sp.linalg.cho_factor(M) + + def solve(r): + return sp.linalg.cho_solve(L, r) + else: + # this seems to cache the matrix factorization, so solving + # with multiple right hand sides is much faster + def solve(r, sym_pos=sym_pos): + if sym_pos: + return sp.linalg.solve(M, r, assume_a="pos") + else: + return sp.linalg.solve(M, r) + # There are many things that can go wrong here, and it's hard to say + # what all of them are. It doesn't really matter: if the matrix can't be + # factorized, return None. get_solver will be called again with different + # inputs, and a new routine will try to factorize the matrix. + except KeyboardInterrupt: + raise + except Exception: + return None + return solve + + +def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False, + lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A'): + """ + Given standard form problem defined by ``A``, ``b``, and ``c``; + current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; + algorithmic parameters ``gamma and ``eta; + and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` + (predictor-corrector), and ``ip`` (initial point improvement), + get the search direction for increments to the variable estimates. + + Parameters + ---------- + As defined in [4], except: + sparse : bool + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + pc : bool + True if the predictor-corrector method of Mehrota is to be used. This + is almost always (if not always) beneficial. Even though it requires + the solution of an additional linear system, the factorization + is typically (implicitly) reused so solution is efficient, and the + number of algorithm iterations is typically reduced. + ip : bool + True if the improved initial point suggestion due to [4] section 4.3 + is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + Search directions as defined in [4] + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if A.shape[0] == 0: + # If there are no constraints, some solvers fail (understandably) + # rather than returning empty solution. This gets the job done. + sparse, lstsq, sym_pos, cholesky = False, False, True, False + n_x = len(x) + + # [4] Equation 8.8 + r_P = b * tau - A.dot(x) + r_D = c * tau - A.T.dot(y) - z + r_G = c.dot(x) - b.transpose().dot(y) + kappa + mu = (x.dot(z) + tau * kappa) / (n_x + 1) + + # Assemble M from [4] Equation 8.31 + Dinv = x / z + + if sparse: + M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) + else: + M = A.dot(Dinv.reshape(-1, 1) * A.T) + solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec) + + # pc: "predictor-corrector" [4] Section 4.1 + # In development this option could be turned off + # but it always seems to improve performance substantially + n_corrections = 1 if pc else 0 + + i = 0 + alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 + while i <= n_corrections: + # Reference [4] Eq. 8.6 + rhatp = eta(gamma) * r_P + rhatd = eta(gamma) * r_D + rhatg = eta(gamma) * r_G + + # Reference [4] Eq. 8.7 + rhatxs = gamma * mu - x * z + rhattk = gamma * mu - tau * kappa + + if i == 1: + if ip: # if the correction is to get "initial point" + # Reference [4] Eq. 8.23 + rhatxs = ((1 - alpha) * gamma * mu - + x * z - alpha**2 * d_x * d_z) + rhattk = ((1 - alpha) * gamma * mu - + tau * kappa - + alpha**2 * d_tau * d_kappa) + else: # if the correction is for "predictor-corrector" + # Reference [4] Eq. 8.13 + rhatxs -= d_x * d_z + rhattk -= d_tau * d_kappa + + # sometimes numerical difficulties arise as the solution is approached + # this loop tries to solve the equations using a sequence of functions + # for solve. For dense systems, the order is: + # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, + # 2. scipy.linalg.solve w/ sym_pos = True, + # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails + # 4. scipy.linalg.lstsq + # For sparse systems, the order is: + # 1. sksparse.cholmod.cholesky (if available) + # 2. scipy.sparse.linalg.factorized (if umfpack available) + # 3. scipy.sparse.linalg.splu + # 4. scipy.sparse.linalg.lsqr + solved = False + while not solved: + try: + # [4] Equation 8.28 + p, q = _sym_solve(Dinv, A, c, b, solve) + # [4] Equation 8.29 + u, v = _sym_solve(Dinv, A, rhatd - + (1 / x) * rhatxs, rhatp, solve) + if np.any(np.isnan(p)) or np.any(np.isnan(q)): + raise LinAlgError + solved = True + except (LinAlgError, ValueError, TypeError) as e: + # Usually this doesn't happen. If it does, it happens when + # there are redundant constraints or when approaching the + # solution. If so, change solver. + if cholesky: + cholesky = False + warn( + "Solving system with option 'cholesky':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'cholesky' to False.", + OptimizeWarning, stacklevel=5) + elif sym_pos: + sym_pos = False + warn( + "Solving system with option 'sym_pos':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'sym_pos' to False.", + OptimizeWarning, stacklevel=5) + elif not lstsq: + lstsq = True + warn( + "Solving system with option 'sym_pos':False " + "failed. This may happen occasionally, " + "especially as the solution is " + "approached. However, if you see this frequently, " + "your problem may be numerically challenging. " + "If you cannot improve the formulation, consider " + "setting 'lstsq' to True. Consider also setting " + "`presolve` to True, if it is not already.", + OptimizeWarning, stacklevel=5) + else: + raise e + solve = _get_solver(M, sparse, lstsq, sym_pos, + cholesky, permc_spec) + # [4] Results after 8.29 + d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / + (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) + d_x = u + p * d_tau + d_y = v + q * d_tau + + # [4] Relations between after 8.25 and 8.26 + d_z = (1 / x) * (rhatxs - z * d_x) + d_kappa = 1 / tau * (rhattk - kappa * d_tau) + + # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23 + alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) + if ip: # initial point - see [4] 4.4 + gamma = 10 + else: # predictor-corrector, [4] definition after 8.12 + beta1 = 0.1 # [4] pg. 220 (Table 8.1) + gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) + i += 1 + + return d_x, d_y, d_z, d_tau, d_kappa + + +def _sym_solve(Dinv, A, r1, r2, solve): + """ + An implementation of [4] equation 8.31 and 8.32 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 8.31 + r = r2 + A.dot(Dinv * r1) + v = solve(r) + # [4] 8.32 + u = Dinv * (A.T.dot(v) - r1) + return u, v + + +def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): + """ + An implementation of [4] equation 8.21 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 4.3 Equation 8.21, ignoring 8.20 requirement + # same step is taken in primal and dual spaces + # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3 + # the value 1 is used in Mehrota corrector and initial point correction + i_x = d_x < 0 + i_z = d_z < 0 + alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 + alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 + alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 + alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 + alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) + return alpha + + +def _get_message(status): + """ + Given problem status code, return a more detailed message. + + Parameters + ---------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + Returns + ------- + message : str + A string descriptor of the exit status of the optimization. + + """ + messages = ( + ["Optimization terminated successfully.", + "The iteration limit was reached before the algorithm converged.", + "The algorithm terminated successfully and determined that the " + "problem is infeasible.", + "The algorithm terminated successfully and determined that the " + "problem is unbounded.", + "Numerical difficulties were encountered before the problem " + "converged. Please check your problem formulation for errors, " + "independence of linear equality constraints, and reasonable " + "scaling and matrix condition numbers. If you continue to " + "encounter this error, please submit a bug report." + ]) + return messages[status] + + +def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): + """ + An implementation of [4] Equation 8.9 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + x = x + alpha * d_x + tau = tau + alpha * d_tau + z = z + alpha * d_z + kappa = kappa + alpha * d_kappa + y = y + alpha * d_y + return x, y, z, tau, kappa + + +def _get_blind_start(shape): + """ + Return the starting point from [4] 4.4 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + m, n = shape + x0 = np.ones(n) + y0 = np.zeros(m) + z0 = np.ones(n) + tau0 = 1 + kappa0 = 1 + return x0, y0, z0, tau0, kappa0 + + +def _indicators(A, b, c, c0, x, y, z, tau, kappa): + """ + Implementation of several equations from [4] used as indicators of + the status of optimization. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + + # residuals for termination are relative to initial values + x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) + + # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8 + def r_p(x, tau): + return b * tau - A.dot(x) + + def r_d(y, z, tau): + return c * tau - A.T.dot(y) - z + + def r_g(x, y, kappa): + return kappa + c.dot(x) - b.dot(y) + + # np.dot unpacks if they are arrays of size one + def mu(x, tau, z, kappa): + return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) + + obj = c.dot(x / tau) + c0 + + def norm(a): + return np.linalg.norm(a) + + # See [4], Section 4.5 - The Stopping Criteria + r_p0 = r_p(x0, tau0) + r_d0 = r_d(y0, z0, tau0) + r_g0 = r_g(x0, y0, kappa0) + mu_0 = mu(x0, tau0, z0, kappa0) + rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) + rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) + rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) + rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) + rho_mu = mu(x, tau, z, kappa) / mu_0 + return rho_p, rho_d, rho_A, rho_g, rho_mu, obj + + +def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): + """ + Print indicators of optimization status to the console. + + Parameters + ---------- + rho_p : float + The (normalized) primal feasibility, see [4] 4.5 + rho_d : float + The (normalized) dual feasibility, see [4] 4.5 + rho_g : float + The (normalized) duality gap, see [4] 4.5 + alpha : float + The step size, see [4] 4.3 + rho_mu : float + The (normalized) path parameter, see [4] 4.5 + obj : float + The objective function value of the current iterate + header : bool + True if a header is to be printed + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if header: + print("Primal Feasibility ", + "Dual Feasibility ", + "Duality Gap ", + "Step ", + "Path Parameter ", + "Objective ") + + # no clue why this works + fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' + print(fmt.format( + float(rho_p), + float(rho_d), + float(rho_g), + alpha if isinstance(alpha, str) else float(alpha), + float(rho_mu), + float(obj))) + + +def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, + sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args): + r""" + Solve a linear programming problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + using the interior point method of [4]. + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in ``A`` (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + alpha0 : float + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_3`of [4] Table 8.1 + beta : float + The desired reduction of the path parameter :math:`\mu` (see [6]_) + maxiter : int + The maximum number of iterations of the algorithm. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + tol : float + Termination tolerance; see [4]_ Section 4.5. + sparse : bool + Set to ``True`` if the problem is to be treated as sparse. However, + the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as + (dense) arrays rather than sparse matrices. + lstsq : bool + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left as ``False`` unless severe + numerical difficulties are frequently encountered, and a better option + would be to improve the formulation of the problem. + sym_pos : bool + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix (almost always). + cholesky : bool + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for moderate, dense problems + that are numerically well-behaved. + pc : bool + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True only when an algorithm has completed successfully, + so this is always False as the callback function is called + only while the algorithm is still iterating. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the algorithm being executed. This is always + 1 for the interior-point method because it has only one phase. + status : int + For revised simplex, this is always 0 because if a different + status is detected, the algorithm terminates. + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Returns + ------- + x_hat : float + Solution vector (for standard form problem). + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at: + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + + """ + + iteration = 0 + + # default initial point + x, y, z, tau, kappa = _get_blind_start(A.shape) + + # first iteration is special improvement of initial point + ip = ip if pc else False + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) + + if disp: + _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + status = 0 + message = "Optimization terminated successfully." + + if sparse: + A = sps.csc_matrix(A) + + while go: + + iteration += 1 + + if ip: # initial point + # [4] Section 4.4 + gamma = 1 + + def eta(g): + return 1 + else: + # gamma = 0 in predictor step according to [4] 4.1 + # if predictor/corrector is off, use mean of complementarity [6] + # 5.1 / [4] Below Figure 10-4 + gamma = 0 if pc else beta * np.mean(z * x) + # [4] Section 4.1 + + def eta(g=gamma): + return 1 - g + + try: + # Solve [4] 8.6 and 8.7/8.13/8.23 + d_x, d_y, d_z, d_tau, d_kappa = _get_delta( + A, b, c, x, y, z, tau, kappa, gamma, eta, + sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) + + if ip: # initial point + # [4] 4.4 + # Formula after 8.23 takes a full step regardless if this will + # take it negative + alpha = 1.0 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, + d_z, d_tau, d_kappa, alpha) + x[x < 1] = 1 + z[z < 1] = 1 + tau = max(1, tau) + kappa = max(1, kappa) + ip = False # done with initial point + else: + # [4] Section 4.3 + alpha = _get_step(x, d_x, z, d_z, tau, + d_tau, kappa, d_kappa, alpha0) + # [4] Equation 8.9 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) + + except (LinAlgError, FloatingPointError, + ValueError, ZeroDivisionError): + # this can happen when sparse solver is used and presolve + # is turned off. Also observed ValueError in AppVeyor Python 3.6 + # Win32 build (PR #8676). I've never seen it otherwise. + status = 4 + message = _get_message(status) + break + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol + + if disp: + _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + # [4] 4.5 + inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * + max(1, kappa)) + inf2 = rho_mu < tol and tau < tol * min(1, kappa) + if inf1 or inf2: + # [4] Lemma 8.4 / Theorem 8.3 + if b.transpose().dot(y) > tol: + status = 2 + else: # elif c.T.dot(x) < tol: ? Probably not necessary. + status = 3 + message = _get_message(status) + break + elif iteration >= maxiter: + status = 1 + message = _get_message(status) + break + + x_hat = x / tau + # [4] Statement after Theorem 8.2 + return x_hat, status, message, iteration + + +def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8, + disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False, + sym_pos=True, cholesky=None, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A', **unknown_options): + r""" + Minimize a linear objective function subject to linear + equality and non-negativity constraints using the interior point method + of [4]_. Linear programming is intended to solve problems + of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional + Callback function to be executed once per iteration. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int (default = 1000) + The maximum number of iterations of the algorithm. + tol : float (default = 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + disp : bool (default = False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + alpha0 : float (default = 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default = 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default = False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default = False) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default = True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default = True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for problems + that are numerically well-behaved. + pc : bool (default = True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default = False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``, and no SuiteSparse.) + A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With default options, the solver used to perform the factorization depends + on third-party software availability and the conditioning of the problem. + + For dense problems, solvers are tried in the following order: + + 1. ``scipy.linalg.cho_factor`` + + 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` + + 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` + + 4. ``scipy.linalg.lstsq`` + + For sparse problems: + + 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed) + + 2. ``scipy.sparse.linalg.factorized`` + (if scikit-umfpack and SuiteSparse are installed) + + 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) + + 4. ``scipy.sparse.linalg.lsqr`` + + If the solver fails for any reason, successively more robust (but slower) + solvers are attempted in the order indicated. Attempting, failing, and + re-starting factorization can be time consuming, so if the problem is + numerically challenging, options can be set to bypass solvers that are + failing. Setting ``cholesky=False`` skips to solver 2, + ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips + to solver 4 for both sparse and dense problems. + + Potential improvements for combatting issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point methods + for large scale linear programming. HEC/Universite de Geneve, 1996. + + """ + + _check_unknown_options(unknown_options) + + # These should be warnings, not errors + if (cholesky or cholesky is None) and sparse and not has_cholmod: + if cholesky: + warn("Sparse cholesky is only available with scikit-sparse. " + "Setting `cholesky = False`", + OptimizeWarning, stacklevel=3) + cholesky = False + + if sparse and lstsq: + warn("Option combination 'sparse':True and 'lstsq':True " + "is not recommended.", + OptimizeWarning, stacklevel=3) + + if lstsq and cholesky: + warn("Invalid option combination 'lstsq':True " + "and 'cholesky':True; option 'cholesky' has no effect when " + "'lstsq' is set True.", + OptimizeWarning, stacklevel=3) + + valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') + if permc_spec.upper() not in valid_permc_spec: + warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " + "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " + "and 'COLAMD'. Reverting to default.", + OptimizeWarning, stacklevel=3) + permc_spec = 'MMD_AT_PLUS_A' + + # This can be an error + if not sym_pos and cholesky: + raise ValueError( + "Invalid option combination 'sym_pos':False " + "and 'cholesky':True: Cholesky decomposition is only possible " + "for symmetric positive definite matrices.") + + cholesky = cholesky or (cholesky is None and sym_pos and not lstsq) + + x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, + maxiter, disp, tol, sparse, + lstsq, sym_pos, cholesky, + pc, ip, permc_spec, callback, + postsolve_args) + + return x, status, message, iteration diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3d25cee4d9ce6b1c5a40cc474d97ec13474ebafc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py @@ -0,0 +1,1522 @@ +""" +Method agnostic utility functions for linear programming +""" + +import numpy as np +import scipy.sparse as sps +from warnings import warn +from ._optimize import OptimizeWarning +from scipy.optimize._remove_redundancy import ( + _remove_redundancy_svd, _remove_redundancy_pivot_sparse, + _remove_redundancy_pivot_dense, _remove_redundancy_id + ) +from collections import namedtuple + +_LPProblem = namedtuple('_LPProblem', + 'c A_ub b_ub A_eq b_eq bounds x0 integrality') +_LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg +_LPProblem.__doc__ = \ + """ Represents a linear-programming problem. + + Attributes + ---------- + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats + are: + * a 2D array (N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, the bounds can be specified as + a 1-D or 2-D array or sequence with 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + Absent lower and/or upper bounds can be specified as -numpy.inf (no + lower bound), numpy.inf (no upper bound) or None (both). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcasted to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Notes + ----- + This namedtuple supports 2 ways of initialization: + >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4]) + >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4]) + + Note that only ``c`` is a required argument here, whereas all other arguments + ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with + default values of None. + For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``: + >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10]) + """ + + +def _check_sparse_inputs(options, meth, A_ub, A_eq): + """ + Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified + optional sparsity variables. + + Parameters + ---------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + method : str, optional + The algorithm used to solve the standard form problem. + + Returns + ------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + """ + # This is an undocumented option for unit testing sparse presolve + _sparse_presolve = options.pop('_sparse_presolve', False) + if _sparse_presolve and A_eq is not None: + A_eq = sps.coo_matrix(A_eq) + if _sparse_presolve and A_ub is not None: + A_ub = sps.coo_matrix(A_ub) + + sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub) + + preferred_methods = {"highs", "highs-ds", "highs-ipm"} + dense_methods = {"simplex", "revised simplex"} + if meth in dense_methods and sparse_constraint: + raise ValueError(f"Method '{meth}' does not support sparse " + "constraint matrices. Please consider using one of " + f"{preferred_methods}.") + + sparse = options.get('sparse', False) + if not sparse and sparse_constraint and meth == 'interior-point': + options['sparse'] = True + warn("Sparse constraint matrix detected; setting 'sparse':True.", + OptimizeWarning, stacklevel=4) + return options, A_ub, A_eq + + +def _format_A_constraints(A, n_x, sparse_lhs=False): + """Format the left hand side of the constraints to a 2-D array + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + n_x : int + The number of variables in the linear programming problem. + sparse_lhs : bool + Whether either of `A_ub` or `A_eq` are sparse. If true return a + coo_matrix instead of a numpy array. + + Returns + ------- + np.ndarray or sparse.coo_matrix + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + + """ + if sparse_lhs: + return sps.coo_matrix( + (0, n_x) if A is None else A, dtype=float, copy=True + ) + elif A is None: + return np.zeros((0, n_x), dtype=float) + else: + return np.array(A, dtype=float, copy=True) + + +def _format_b_constraints(b): + """Format the upper bounds of the constraints to a 1-D array + + Parameters + ---------- + b : 1-D array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + Returns + ------- + 1-D np.array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + """ + if b is None: + return np.array([], dtype=float) + b = np.array(b, dtype=float, copy=True).squeeze() + return b if b.size != 1 else b.reshape(-1) + + +def _clean_inputs(lp): + """ + Given user inputs for a linear programming problem, return the + objective vector, upper bound constraints, equality constraints, + and simple bounds in a preferred format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if c is None: + raise TypeError + + try: + c = np.array(c, dtype=np.float64, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: c must be a 1-D array of numerical " + "coefficients") from e + else: + # If c is a single value, convert it to a 1-D array. + if c.size == 1: + c = c.reshape(-1) + + n_x = len(c) + if n_x == 0 or len(c.shape) != 1: + raise ValueError( + "Invalid input for linprog: c must be a 1-D array and must " + "not have more than one non-singleton dimension") + if not np.isfinite(c).all(): + raise ValueError( + "Invalid input for linprog: c must not contain values " + "inf, nan, or None") + + sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub) + try: + A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_ub must be a 2-D array " + "of numerical values") from e + else: + n_ub = A_ub.shape[0] + if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_ub must have exactly two " + "dimensions, and the number of columns in A_ub must be " + "equal to the size of c") + if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() + or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): + raise ValueError( + "Invalid input for linprog: A_ub must not contain values " + "inf, nan, or None") + + try: + b_ub = _format_b_constraints(b_ub) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_ub must be a 1-D array of " + "numerical values, each representing the upper bound of an " + "inequality constraint (row) in A_ub") from e + else: + if b_ub.shape != (n_ub,): + raise ValueError( + "Invalid input for linprog: b_ub must be a 1-D array; b_ub " + "must not have more than one non-singleton dimension and " + "the number of rows in A_ub must equal the number of values " + "in b_ub") + if not np.isfinite(b_ub).all(): + raise ValueError( + "Invalid input for linprog: b_ub must not contain values " + "inf, nan, or None") + + try: + A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_eq must be a 2-D array " + "of numerical values") from e + else: + n_eq = A_eq.shape[0] + if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_eq must have exactly two " + "dimensions, and the number of columns in A_eq must be " + "equal to the size of c") + + if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() + or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): + raise ValueError( + "Invalid input for linprog: A_eq must not contain values " + "inf, nan, or None") + + try: + b_eq = _format_b_constraints(b_eq) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_eq must be a dense, 1-D array of " + "numerical values, each representing the right hand side of an " + "equality constraint (row) in A_eq") from e + else: + if b_eq.shape != (n_eq,): + raise ValueError( + "Invalid input for linprog: b_eq must be a 1-D array; b_eq " + "must not have more than one non-singleton dimension and " + "the number of rows in A_eq must equal the number of values " + "in b_eq") + if not np.isfinite(b_eq).all(): + raise ValueError( + "Invalid input for linprog: b_eq must not contain values " + "inf, nan, or None") + + # x0 gives a (optional) starting solution to the solver. If x0 is None, + # skip the checks. Initial solution will be generated automatically. + if x0 is not None: + try: + x0 = np.array(x0, dtype=float, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: x0 must be a 1-D array of " + "numerical coefficients") from e + if x0.ndim == 0: + x0 = x0.reshape(-1) + if len(x0) == 0 or x0.ndim != 1: + raise ValueError( + "Invalid input for linprog: x0 should be a 1-D array; it " + "must not have more than one non-singleton dimension") + if not x0.size == c.size: + raise ValueError( + "Invalid input for linprog: x0 and c should contain the " + "same number of elements") + if not np.isfinite(x0).all(): + raise ValueError( + "Invalid input for linprog: x0 must not contain values " + "inf, nan, or None") + + # Bounds can be one of these formats: + # (1) a 2-D array or sequence, with shape N x 2 + # (2) a 1-D or 2-D sequence or array with 2 scalars + # (3) None (or an empty sequence or array) + # Unspecified bounds can be represented by None or (-)np.inf. + # All formats are converted into a N x 2 np.array with (-)np.inf where + # bounds are unspecified. + + # Prepare clean bounds array + bounds_clean = np.zeros((n_x, 2), dtype=float) + + # Convert to a numpy array. + # np.array(..,dtype=float) raises an error if dimensions are inconsistent + # or if there are invalid data types in bounds. Just add a linprog prefix + # to the error and re-raise. + # Creating at least a 2-D array simplifies the cases to distinguish below. + if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]): + bounds = (0, np.inf) + try: + bounds_conv = np.atleast_2d(np.array(bounds, dtype=float)) + except ValueError as e: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + except TypeError as e: + raise TypeError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + + # Check bounds options + bsh = bounds_conv.shape + if len(bsh) > 2: + # Do not try to handle multidimensional bounds input + raise ValueError( + "Invalid input for linprog: provide a 2-D array for bounds, " + f"not a {len(bsh):d}-D array.") + elif np.all(bsh == (n_x, 2)): + # Regular N x 2 array + bounds_clean = bounds_conv + elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))): + # 2 values: interpret as overall lower and upper bound + bounds_flat = bounds_conv.flatten() + bounds_clean[:, 0] = bounds_flat[0] + bounds_clean[:, 1] = bounds_flat[1] + elif np.all(bsh == (2, n_x)): + # Reject a 2 x N array + raise ValueError( + f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, " + f"not a 2 x {n_x:d} array.") + else: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds with this " + f"dimension tuple: {bsh}.") + + # The process above creates nan-s where the input specified None + # Convert the nan-s in the 1st column to -np.inf and in the 2nd column + # to np.inf + i_none = np.isnan(bounds_clean[:, 0]) + bounds_clean[i_none, 0] = -np.inf + i_none = np.isnan(bounds_clean[:, 1]) + bounds_clean[i_none, 1] = np.inf + + return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality) + + +def _presolve(lp, rr, rr_method, tol=1e-9): + """ + Given inputs for a linear programming problem in preferred format, + presolve the problem: identify trivial infeasibilities, redundancies, + and unboundedness, tighten bounds where possible, and eliminate fixed + variables. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + rr : bool + If ``True`` attempts to eliminate any redundant rows in ``A_eq``. + Set False if ``A_eq`` is known to be of full row rank, or if you are + looking for a potential speedup (at the expense of reliability). + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : 1D array + Constant term in objective function due to fixed (and eliminated) + variables. + x : 1D array + Solution vector (when the solution is trivial and can be determined + in presolve) + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + (currently, the revstack contains only one function) + complete: bool + Whether the solution is complete (solved or determined to be infeasible + or unbounded in presolve) + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + + """ + # ideas from Reference [5] by Andersen and Andersen + # however, unlike the reference, this is performed before converting + # problem to standard form + # There are a few advantages: + # * artificial variables have not been added, so matrices are smaller + # * bounds have not been converted to constraints yet. (It is better to + # do that after presolve because presolve may adjust the simple bounds.) + # There are many improvements that can be made, namely: + # * implement remaining checks from [5] + # * loop presolve until no additional changes are made + # * implement additional efficiency improvements in redundancy removal [2] + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp + + revstack = [] # record of variables eliminated from problem + # constant term in cost function may be added if variables are eliminated + c0 = 0 + complete = False # complete is True if detected infeasible/unbounded + x = np.zeros(c.shape) # this is solution vector if completed in presolve + + status = 0 # all OK unless determined otherwise + message = "" + + # Lower and upper bounds. Copy to prevent feedback. + lb = bounds[:, 0].copy() + ub = bounds[:, 1].copy() + + m_eq, n = A_eq.shape + m_ub, n = A_ub.shape + + if (rr_method is not None + and rr_method.lower() not in {"svd", "pivot", "id"}): + message = ("'" + str(rr_method) + "' is not a valid option " + "for redundancy removal. Valid options are 'SVD', " + "'pivot', and 'ID'.") + raise ValueError(message) + + if sps.issparse(A_eq): + A_eq = A_eq.tocsr() + A_ub = A_ub.tocsr() + + def where(A): + return A.nonzero() + + vstack = sps.vstack + else: + where = np.where + vstack = np.vstack + + # upper bounds > lower bounds + if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf): + status = 2 + message = ("The problem is (trivially) infeasible since one " + "or more upper bounds are smaller than the corresponding " + "lower bounds, a lower bound is np.inf or an upper bound " + "is -np.inf.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # zero row in equality constraints + zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any( + np.logical_and( + zero_row, + np.abs(b_eq) > tol)): # test_zero_row_1 + # infeasible if RHS is not zero + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if RHS is zero, we can eliminate this equation entirely + A_eq = A_eq[np.logical_not(zero_row), :] + b_eq = b_eq[np.logical_not(zero_row)] + + # zero row in inequality constraints + zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 + # infeasible if RHS is less than zero (because LHS is zero) + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if LHS is >= 0, we can eliminate this constraint entirely + A_ub = A_ub[np.logical_not(zero_row), :] + b_ub = b_ub[np.logical_not(zero_row)] + + # zero column in (both) constraints + # this indicates that a variable isn't constrained and can be removed + A = vstack((A_eq, A_ub)) + if A.shape[0] > 0: + zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() + # variable will be at upper or lower bound, depending on objective + x[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + x[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + if np.any(np.isinf(x)): # if an unconstrained variable has no bound + status = 3 + message = ("If feasible, the problem is (trivially) unbounded " + "due to a zero column in the constraint matrices. If " + "you wish to check whether the problem is infeasible, " + "turn presolve off.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + # variables will equal upper/lower bounds will be removed later + lb[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + ub[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + + # row singleton in equality constraints + # this fixes a variable and removes the constraint + singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() + rows = where(singleton_row)[0] + cols = where(A_eq[rows, :])[1] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_eq[row] / A_eq[row, col] + if not lb[col] - tol <= val <= ub[col] + tol: + # infeasible if fixed value is not within bounds + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the equality constraints is " + "inconsistent with the bounds.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: + # sets upper and lower bounds at that fixed value - variable + # will be removed later + lb[col] = val + ub[col] = val + A_eq = A_eq[np.logical_not(singleton_row), :] + b_eq = b_eq[np.logical_not(singleton_row)] + + # row singleton in inequality constraints + # this indicates a simple bound and the constraint can be removed + # simple bounds may be adjusted here + # After all of the simple bound information is combined here, get_Abc will + # turn the simple bounds into constraints + singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() + cols = where(A_ub[singleton_row, :])[1] + rows = where(singleton_row)[0] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_ub[row] / A_ub[row, col] + if A_ub[row, col] > 0: # upper bound + if val < lb[col] - tol: # infeasible + complete = True + elif val < ub[col]: # new upper bound + ub[col] = val + else: # lower bound + if val > ub[col] + tol: # infeasible + complete = True + elif val > lb[col]: # new lower bound + lb[col] = val + if complete: + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the upper bound constraints is " + "inconsistent with the bounds.") + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + A_ub = A_ub[np.logical_not(singleton_row), :] + b_ub = b_ub[np.logical_not(singleton_row)] + + # identical bounds indicate that variable can be removed + i_f = np.abs(lb - ub) < tol # indices of "fixed" variables + i_nf = np.logical_not(i_f) # indices of "not fixed" variables + + # test_bounds_equal_but_infeasible + if np.all(i_f): # if bounds define solution, check for consistency + residual = b_eq - A_eq.dot(lb) + slack = b_ub - A_ub.dot(lb) + if ((A_ub.size > 0 and np.any(slack < 0)) or + (A_eq.size > 0 and not np.allclose(residual, 0))): + status = 2 + message = ("The problem is (trivially) infeasible because the " + "bounds fix all variables to values inconsistent with " + "the constraints") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + ub_mod = ub + lb_mod = lb + if np.any(i_f): + c0 += c[i_f].dot(lb[i_f]) + b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) + b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) + c = c[i_nf] + x_undo = lb[i_f] # not x[i_f], x is just zeroes + x = x[i_nf] + # user guess x0 stays separate from presolve solution x + if x0 is not None: + x0 = x0[i_nf] + A_eq = A_eq[:, i_nf] + A_ub = A_ub[:, i_nf] + # modify bounds + lb_mod = lb[i_nf] + ub_mod = ub[i_nf] + + def rev(x_mod): + # Function to restore x: insert x_undo into x_mod. + # When elements have been removed at positions k1, k2, k3, ... + # then these must be replaced at (after) positions k1-1, k2-2, + # k3-3, ... in the modified array to recreate the original + i = np.flatnonzero(i_f) + # Number of variables to restore + N = len(i) + index_offset = np.arange(N) + # Create insert indices + insert_indices = i - index_offset + x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo) + return x_rev + + # Use revstack as a list of functions, currently just this one. + revstack.append(rev) + + # no constraints indicates that problem is trivial + if A_eq.size == 0 and A_ub.size == 0: + b_eq = np.array([]) + b_ub = np.array([]) + # test_empty_constraint_1 + if c.size == 0: + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or + np.any(np.logical_and(c > 0, lb_mod == -np.inf))): + # test_no_constraints() + # test_unbounded_no_nontrivial_constraints_1 + # test_unbounded_no_nontrivial_constraints_2 + status = 3 + message = ("The problem is (trivially) unbounded " + "because there are no non-trivial constraints and " + "a) at least one decision variable is unbounded " + "above and its corresponding cost is negative, or " + "b) at least one decision variable is unbounded below " + "and its corresponding cost is positive. ") + else: # test_empty_constraint_2 + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + complete = True + x[c < 0] = ub_mod[c < 0] + x[c > 0] = lb_mod[c > 0] + # where c is zero, set x to a finite bound or zero + x_zero_c = ub_mod[c == 0] + x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)] + x_zero_c[np.isinf(x_zero_c)] = 0 + x[c == 0] = x_zero_c + # if this is not the last step of presolve, should convert bounds back + # to array and return here + + # Convert modified lb and ub back into N x 2 bounds + bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis])) + + # remove redundant (linearly dependent) rows from equality constraints + n_rows_A = A_eq.shape[0] + redundancy_warning = ("A_eq does not appear to be of full row rank. To " + "improve performance, check the problem formulation " + "for redundant equality constraints.") + if (sps.issparse(A_eq)): + if rr and A_eq.size > 0: # TODO: Fast sparse rank check? + rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if A_eq.shape[0] < n_rows_A: + warn(redundancy_warning, OptimizeWarning, stacklevel=1) + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # This is a wild guess for which redundancy removal algorithm will be + # faster. More testing would be good. + small_nullspace = 5 + if rr and A_eq.size > 0: + try: # TODO: use results of first SVD in _remove_redundancy_svd + rank = np.linalg.matrix_rank(A_eq) + # oh well, we'll have to go with _remove_redundancy_pivot_dense + except Exception: + rank = 0 + if rr and A_eq.size > 0 and rank < A_eq.shape[0]: + warn(redundancy_warning, OptimizeWarning, stacklevel=3) + dim_row_nullspace = A_eq.shape[0]-rank + if rr_method is None: + if dim_row_nullspace <= small_nullspace: + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if dim_row_nullspace > small_nullspace or status == 4: + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + + else: + rr_method = rr_method.lower() + if rr_method == "svd": + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "pivot": + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "id": + rr_res = _remove_redundancy_id(A_eq, b_eq, rank) + A_eq, b_eq, status, message = rr_res + else: # shouldn't get here; option validity checked above + pass + if A_eq.shape[0] < rank: + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + status = 4 + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + +def _parse_linprog(lp, options, meth): + """ + Parse the provided linear programming problem + + ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and + ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the + provided constraints (``A_ub`` and ``A_eq) and if these match the provided + sparsity optional values. + + ``_clean inputs`` checks of the provided inputs. If no violations are + identified the objective vector, upper bound constraints, equality + constraints, and simple bounds are returned in the expected format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + """ + if options is None: + options = {} + + solver_options = {k: v for k, v in options.items()} + solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth, + lp.A_ub, lp.A_eq) + # Convert lists to numpy arrays, etc... + lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq)) + return lp, solver_options + + +def _get_Abc(lp, c0): + """ + Given a linear programming problem of the form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + Return the problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + by adding slack variables and making variable substitutions as necessary. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + + Returns + ------- + A : 2-D array + 2-D array such that ``A`` @ ``x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in A (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + x0 : 1-D array + Starting values of the independent variables, which will be refined by + the optimization algorithm + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if sps.issparse(A_eq): + sparse = True + A_eq = sps.csr_matrix(A_eq) + A_ub = sps.csr_matrix(A_ub) + + def hstack(blocks): + return sps.hstack(blocks, format="csr") + + def vstack(blocks): + return sps.vstack(blocks, format="csr") + + zeros = sps.csr_matrix + eye = sps.eye + else: + sparse = False + hstack = np.hstack + vstack = np.vstack + zeros = np.zeros + eye = np.eye + + # Variables lbs and ubs (see below) may be changed, which feeds back into + # bounds, so copy. + bounds = np.array(bounds, copy=True) + + # modify problem such that all variables have only non-negativity bounds + lbs = bounds[:, 0] + ubs = bounds[:, 1] + m_ub, n_ub = A_ub.shape + + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + + # unbounded below: substitute xi = -xi' (unbounded above) + # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds + l_nolb_someub = np.logical_and(lb_none, ub_some) + i_nolb = np.nonzero(l_nolb_someub)[0] + lbs[l_nolb_someub], ubs[l_nolb_someub] = ( + -ubs[l_nolb_someub], -lbs[l_nolb_someub]) + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + c[i_nolb] *= -1 + if x0 is not None: + x0[i_nolb] *= -1 + if len(i_nolb) > 0: + if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird + A_ub[:, i_nolb] *= -1 + if A_eq.shape[0] > 0: + A_eq[:, i_nolb] *= -1 + + # upper bound: add inequality constraint + i_newub, = ub_some.nonzero() + ub_newub = ubs[ub_some] + n_bounds = len(i_newub) + if n_bounds > 0: + shape = (n_bounds, A_ub.shape[1]) + if sparse: + idxs = (np.arange(n_bounds), i_newub) + A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs), + shape=shape))) + else: + A_ub = vstack((A_ub, np.zeros(shape))) + A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1 + b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) + b_ub[m_ub:] = ub_newub + + A1 = vstack((A_ub, A_eq)) + b = np.concatenate((b_ub, b_eq)) + c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],)))) + # unbounded: substitute xi = xi+ + xi- + l_free = np.logical_and(lb_none, ub_none) + i_free = np.nonzero(l_free)[0] + n_free = len(i_free) + c = np.concatenate((c, np.zeros(n_free))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros(n_free))) + A1 = hstack((A1[:, :n_ub], -A1[:, i_free])) + c[n_ub:n_ub+n_free] = -c[i_free] + if x0 is not None: + i_free_neg = x0[i_free] < 0 + x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]] + x0[i_free[i_free_neg]] = 0 + + # add slack variables + A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) + + A = hstack([A1, A2]) + + # lower bound: substitute xi = xi' + lb + # now there is a constant term in objective + i_shift = np.nonzero(lb_some)[0] + lb_shift = lbs[lb_some].astype(float) + c0 += np.sum(lb_shift * c[i_shift]) + if sparse: + b = b.reshape(-1, 1) + A = A.tocsc() + b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1) + b = b.ravel() + else: + b -= (A[:, i_shift] * lb_shift).sum(axis=1) + if x0 is not None: + x0[i_shift] -= lb_shift + + return A, b, c, c0, x0 + + +def _round_to_power_of_two(x): + """ + Round elements of the array to the nearest power of two. + """ + return 2**np.around(np.log2(x)) + + +def _autoscale(A, b, c, x0): + """ + Scales the problem according to equilibration from [12]. + Also normalizes the right hand side vector by its maximum element. + """ + m, n = A.shape + + C = 1 + R = 1 + + if A.size > 0: + + R = np.max(np.abs(A), axis=1) + if sps.issparse(A): + R = R.toarray().flatten() + R[R == 0] = 1 + R = 1/_round_to_power_of_two(R) + A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1) + b = b*R + + C = np.max(np.abs(A), axis=0) + if sps.issparse(A): + C = C.toarray().flatten() + C[C == 0] = 1 + C = 1/_round_to_power_of_two(C) + A = A*sps.diags(C) if sps.issparse(A) else A*C + c = c*C + + b_scale = np.max(np.abs(b)) if b.size > 0 else 1 + if b_scale == 0: + b_scale = 1. + b = b/b_scale + + if x0 is not None: + x0 = x0/b_scale*(1/C) + return A, b, c, x0, C, b_scale + + +def _unscale(x, C, b_scale): + """ + Converts solution to _autoscale problem -> solution to original problem. + """ + + try: + n = len(C) + # fails if sparse or scalar; that's OK. + # this is only needed for original simplex (never sparse) + except TypeError: + n = len(x) + + return x[:n]*b_scale*C + + +def _display_summary(message, status, fun, iteration): + """ + Print the termination summary of the linear program + + Parameters + ---------- + message : str + A string descriptor of the exit status of the optimization. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + fun : float + Value of the objective function. + iteration : iteration + The number of iterations performed. + """ + print(message) + if status in (0, 1): + print(f" Current function value: {fun: <12.6f}") + print(f" Iterations: {iteration:d}") + + +def _postsolve(x, postsolve_args, complete=False): + """ + Given solution x to presolved, standard form linear program x, add + fixed variables back into the problem and undo the variable substitutions + to get solution to original linear program. Also, calculate the objective + function value, slack in original upper bound constraints, and residuals + in original equality constraints. + + Parameters + ---------- + x : 1-D array + Solution vector to the standard-form problem. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem, including: + + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + complete : bool + Whether the solution is was determined in presolve (``True`` if so) + + Returns + ------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + """ + # note that all the inputs are the ORIGINAL, unmodified versions + # no rows, columns have been removed + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0] + revstack, C, b_scale = postsolve_args[1:] + + x = _unscale(x, C, b_scale) + + # Undo variable substitutions of _get_Abc() + # if "complete", problem was solved in presolve; don't do anything here + n_x = bounds.shape[0] + if not complete and bounds is not None: # bounds are never none, probably + n_unbounded = 0 + for i, bi in enumerate(bounds): + lbi = bi[0] + ubi = bi[1] + if lbi == -np.inf and ubi == np.inf: + n_unbounded += 1 + x[i] = x[i] - x[n_x + n_unbounded - 1] + else: + if lbi == -np.inf: + x[i] = ubi - x[i] + else: + x[i] += lbi + # all the rest of the variables were artificial + x = x[:n_x] + + # If there were variables removed from the problem, add them back into the + # solution vector + # Apply the functions in revstack (reverse direction) + for rev in reversed(revstack): + x = rev(x) + + fun = x.dot(c) + slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints + # report residuals of ORIGINAL EQ constraints + con = b_eq - A_eq.dot(x) + + return x, fun, slack, con + + +def _check_result(x, fun, status, slack, con, bounds, tol, message, + integrality): + """ + Check the validity of the provided solution. + + A valid (optimal) solution satisfies all bounds, all slack variables are + negative and all equality constraint residuals are strictly non-zero. + Further, the lower-bounds, upper-bounds, slack and residuals contain + no nan values. + + Parameters + ---------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + bounds : 2D array + The bounds on the original variables ``x`` + message : str + A string descriptor of the exit status of the optimization. + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + """ + # Somewhat arbitrary + tol = np.sqrt(tol) * 10 + + if x is None: + # HiGHS does not provide x if infeasible/unbounded + if status == 0: # Observed with HiGHS Simplex Primal + status = 4 + message = ("The solver did not provide a solution nor did it " + "report a failure. Please submit a bug report.") + return status, message + + contains_nans = ( + np.isnan(x).any() + or np.isnan(fun) + or np.isnan(slack).any() + or np.isnan(con).any() + ) + + if contains_nans: + is_feasible = False + else: + if integrality is None: + integrality = 0 + valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol) + # When integrality is 2 or 3, x must be within bounds OR take value 0 + valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol) + invalid_bounds = not np.all(valid_bounds) + + invalid_slack = status != 3 and (slack < -tol).any() + invalid_con = status != 3 and (np.abs(con) > tol).any() + is_feasible = not (invalid_bounds or invalid_slack or invalid_con) + + if status == 0 and not is_feasible: + status = 4 + message = ("The solution does not satisfy the constraints within the " + "required tolerance of " + f"{tol:.2E}" + ", yet " + "no errors were raised and there is no certificate of " + "infeasibility or unboundedness. Check whether " + "the slack and constraint residuals are acceptable; " + "if not, consider enabling presolve, adjusting the " + "tolerance option(s), and/or using a different method. " + "Please consider submitting a bug report.") + elif status == 2 and is_feasible: + # Occurs if the simplex method exits after phase one with a very + # nearly basic feasible solution. Postsolving can make the solution + # basic, however, this solution is NOT optimal + status = 4 + message = ("The solution is feasible, but the solver did not report " + "that the solution was optimal. Please try a different " + "method.") + + return status, message diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3113dfbb89439ae3c73682ba36114b036d5a43c6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f60adcc891304e34ac9d85d108b6a232b4bf0c93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py @@ -0,0 +1,5 @@ +"""This module contains least-squares algorithms.""" +from .least_squares import least_squares +from .lsq_linear import lsq_linear + +__all__ = ['least_squares', 'lsq_linear'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c6c6f97a952c3be7c5a977884c5fd21f31f7693 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4464dc6d94d55ffdef3b893b2178c0a7a6fe841b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7d1bcfdbec5493c89522d80a7e4f85960029030 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca2be364ef6ba1bbbfe70aba17aa0f7f1789294b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af13e7ad031b04ad4dfbe9ba53c48a949433f208 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..616f327993d7b95787e36bf42e7264e15ac4816d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4939062928366f50a50791342c6aeadfcaa659ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..935c5aa74616f1b2eee5e65a961b1a45c66fab2c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py new file mode 100644 index 0000000000000000000000000000000000000000..9154bdba5b2cc41883811ba1820dfc251e515d6c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py @@ -0,0 +1,560 @@ +"""Trust Region Reflective algorithm for least-squares optimization. + +The algorithm is based on ideas from paper [STIR]_. The main idea is to +account for the presence of the bounds by appropriate scaling of the variables (or, +equivalently, changing a trust-region shape). Let's introduce a vector v: + + | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf + v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf + | 1, otherwise + +where g is the gradient of a cost function and lb, ub are the bounds. Its +components are distances to the bounds at which the anti-gradient points (if +this distance is finite). Define a scaling matrix D = diag(v**0.5). +First-order optimality conditions can be stated as + + D^2 g(x) = 0. + +Meaning that components of the gradient should be zero for strictly interior +variables, and components must point inside the feasible region for variables +on the bound. + +Now consider this system of equations as a new optimization problem. If the +point x is strictly interior (not on the bound), then the left-hand side is +differentiable and the Newton step for it satisfies + + (D^2 H + diag(g) Jv) p = -D^2 g + +where H is the Hessian matrix (or its J^T J approximation in least squares), +Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all +elements of matrix C = diag(g) Jv are non-negative. Introduce the change +of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables, +we have a Newton step satisfying + + B_h p_h = -g_h, + +where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where +J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect +to "hat" variables. To guarantee global convergence we formulate a +trust-region problem based on the Newton step in the new variables: + + 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta + +In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region +problem is + + 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta + +Here, the meaning of the matrix D becomes more clear: it alters the shape +of a trust-region, such that large steps towards the bounds are not allowed. +In the implementation, the trust-region problem is solved in "hat" space, +but handling of the bounds is done in the original space (see below and read +the code). + +The introduction of the matrix D doesn't allow to ignore bounds, the algorithm +must keep iterates strictly feasible (to satisfy aforementioned +differentiability), the parameter theta controls step back from the boundary +(see the code for details). + +The algorithm does another important trick. If the trust-region solution +doesn't fit into the bounds, then a reflected (from a firstly encountered +bound) search direction is considered. For motivation and analysis refer to +[STIR]_ paper (and other papers of the authors). In practice, it doesn't need +a lot of justifications, the algorithm simply chooses the best step among +three: a constrained trust-region step, a reflected step and a constrained +Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original +space). + +Another feature is that a trust-region radius control strategy is modified to +account for appearance of the diagonal C matrix (called diag_h in the code). + +Note that all described peculiarities are completely gone as we consider +problems without bounds (the algorithm becomes a standard trust-region type +algorithm very similar to ones implemented in MINPACK). + +The implementation supports two methods of solving the trust-region problem. +The first, called 'exact', applies SVD on Jacobian and then solves the problem +very accurately using the algorithm described in [JJMore]_. It is not +applicable to large problem. The second, called 'lsmr', uses the 2-D subspace +approach (sometimes called "indefinite dogleg"), where the problem is solved +in a subspace spanned by the gradient and the approximate Gauss-Newton step +found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is +reformulated as a 4th order algebraic equation and solved very accurately by +``numpy.roots``. The subspace approach allows to solve very large problems +(up to couple of millions of residuals on a regular PC), provided the Jacobian +matrix is sufficiently sparse. + +References +---------- +.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. +.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture +""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import svd, qr +from scipy.sparse.linalg import lsmr +from scipy.optimize import OptimizeResult + +from .common import ( + step_size_to_bound, find_active_constraints, in_bounds, + make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region, + solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d, + evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator, + CL_scaling_vector, compute_grad, compute_jac_scale, check_termination, + update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear, + print_iteration_nonlinear) + + +def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose): + # For efficiency, it makes sense to run the simplified version of the + # algorithm when no bounds are imposed. We decided to write the two + # separate functions. It violates the DRY principle, but the individual + # functions are kept the most readable. + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return trf_no_bounds( + fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + else: + return trf_bounds( + fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + + +def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta): + """Select the best step according to Trust Region Reflective algorithm.""" + if in_bounds(x + p, lb, ub): + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + return p, p_h, -p_value + + p_stride, hits = step_size_to_bound(x, p, lb, ub) + + # Compute the reflected direction. + r_h = np.copy(p_h) + r_h[hits.astype(bool)] *= -1 + r = d * r_h + + # Restrict trust-region step, such that it hits the bound. + p *= p_stride + p_h *= p_stride + x_on_bound = x + p + + # Reflected direction will cross first either feasible region or trust + # region boundary. + _, to_tr = intersect_trust_region(p_h, r_h, Delta) + to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub) + + # Find lower and upper bounds on a step size along the reflected + # direction, considering the strict feasibility requirement. There is no + # single correct way to do that, the chosen approach seems to work best + # on test problems. + r_stride = min(to_bound, to_tr) + if r_stride > 0: + r_stride_l = (1 - theta) * p_stride / r_stride + if r_stride == to_bound: + r_stride_u = theta * to_bound + else: + r_stride_u = to_tr + else: + r_stride_l = 0 + r_stride_u = -1 + + # Check if reflection step is available. + if r_stride_l <= r_stride_u: + a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h) + r_stride, r_value = minimize_quadratic_1d( + a, b, r_stride_l, r_stride_u, c=c) + r_h *= r_stride + r_h += p_h + r = r_h * d + else: + r_value = np.inf + + # Now correct p_h to make it strictly interior. + p *= theta + p_h *= theta + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + + ag_h = -g_h + ag = d * ag_h + + to_tr = Delta / norm(ag_h) + to_bound, _ = step_size_to_bound(x, ag, lb, ub) + if to_bound < to_tr: + ag_stride = theta * to_bound + else: + ag_stride = to_tr + + a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h) + ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride) + ag_h *= ag_stride + ag *= ag_stride + + if p_value < r_value and p_value < ag_value: + return p, p_h, -p_value + elif r_value < p_value and r_value < ag_value: + return r, r_h, -r_value + else: + return ag, ag_h, -ag_value + + +def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + v, dv = CL_scaling_vector(x, g, lb, ub) + v[dv != 0] *= scale_inv[dv != 0] + Delta = norm(x0 * scale_inv / v**0.5) + if Delta == 0: + Delta = 1.0 + + g_norm = norm(g * v, ord=np.inf) + + f_augmented = np.zeros(m + n) + if tr_solver == 'exact': + J_augmented = np.empty((m + n, n)) + elif tr_solver == 'lsmr': + reg_term = 0.0 + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + v, dv = CL_scaling_vector(x, g, lb, ub) + + g_norm = norm(g * v, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + # Now compute variables in "hat" space. Here, we also account for + # scaling introduced by `x_scale` parameter. This part is a bit tricky, + # you have to write down the formulas and see how the trust-region + # problem is formulated when the two types of scaling are applied. + # The idea is that first we apply `x_scale` and then apply Coleman-Li + # approach in the new variables. + + # v is recomputed in the variables after applying `x_scale`, note that + # components which were identically 1 not affected. + v[dv != 0] *= scale_inv[dv != 0] + + # Here, we apply two types of scaling. + d = v**0.5 * scale + + # C = diag(g * scale) Jv + diag_h = g * dv * scale + + # After all this has been done, we continue normally. + + # "hat" gradient. + g_h = d * g + + f_augmented[:m] = f + if tr_solver == 'exact': + J_augmented[:m] = J * d + J_h = J_augmented[:m] # Memory view. + J_augmented[m:] = np.diag(diag_h**0.5) + U, s, V = svd(J_augmented, full_matrices=False) + V = V.T + uf = U.T.dot(f_augmented) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5) + gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) # LinearOperator does dot too. + B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S) + g_S = S.T.dot(g_h) + + # theta controls step back step ratio from the bounds. + theta = max(0.995, 1 - g_norm) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + p_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + p_h = S.dot(p_S) + + p = d * p_h # Trust-region solution in the original space. + step, step_h, predicted_reduction = select_step( + x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta) + + x_new = make_strictly_feasible(x + step, lb, ub, rstep=0) + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + if termination_status is not None: + break + + alpha *= Delta / Delta_new + Delta = Delta_new + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = find_active_constraints(x, lb, ub, rtol=xtol) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) + + +def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, str) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + Delta = norm(x0 * scale_inv) + if Delta == 0: + Delta = 1.0 + + if tr_solver == 'lsmr': + reg_term = 0 + damp = tr_options.pop('damp', 0.0) + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + g_norm = norm(g, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + d = scale + g_h = d * g + + if tr_solver == 'exact': + J_h = J * d + U, s, V = svd(J_h, full_matrices=False) + V = V.T + uf = U.T.dot(f) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + damp_full = (damp**2 + reg_term)**0.5 + gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) + B_S = np.dot(JS.T, JS) + g_S = S.T.dot(g_h) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + step_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + step_h = S.dot(p_S) + + predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h) + step = d * step_h + x_new = x + step + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + if termination_status is not None: + break + + alpha *= Delta / Delta_new + Delta = Delta_new + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = np.zeros_like(x) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_milp.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9ecf52083f1312ad6fceff3e3917ff262d90ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_milp.py @@ -0,0 +1,392 @@ +import warnings +import numpy as np +from scipy.sparse import csc_array, vstack, issparse +from scipy._lib._util import VisibleDeprecationWarning +from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import] +from ._constraints import LinearConstraint, Bounds +from ._optimize import OptimizeResult +from ._linprog_highs import _highs_to_scipy_status_message + + +def _constraints_to_components(constraints): + """ + Convert sequence of constraints to a single set of components A, b_l, b_u. + + `constraints` could be + + 1. A LinearConstraint + 2. A tuple representing a LinearConstraint + 3. An invalid object + 4. A sequence of composed entirely of objects of type 1/2 + 5. A sequence containing at least one object of type 3 + + We want to accept 1, 2, and 4 and reject 3 and 5. + """ + message = ("`constraints` (or each element within `constraints`) must be " + "convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + As = [] + b_ls = [] + b_us = [] + + # Accept case 1 by standardizing as case 4 + if isinstance(constraints, LinearConstraint): + constraints = [constraints] + else: + # Reject case 3 + try: + iter(constraints) + except TypeError as exc: + raise ValueError(message) from exc + + # Accept case 2 by standardizing as case 4 + if len(constraints) == 3: + # argument could be a single tuple representing a LinearConstraint + try: + constraints = [LinearConstraint(*constraints)] + except (TypeError, ValueError, VisibleDeprecationWarning): + # argument was not a tuple representing a LinearConstraint + pass + + # Address cases 4/5 + for constraint in constraints: + # if it's not a LinearConstraint or something that represents a + # LinearConstraint at this point, it's invalid + if not isinstance(constraint, LinearConstraint): + try: + constraint = LinearConstraint(*constraint) + except TypeError as exc: + raise ValueError(message) from exc + As.append(csc_array(constraint.A)) + b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64)) + b_us.append(np.atleast_1d(constraint.ub).astype(np.float64)) + + if len(As) > 1: + A = vstack(As, format="csc") + b_l = np.concatenate(b_ls) + b_u = np.concatenate(b_us) + else: # avoid unnecessary copying + A = As[0] + b_l = b_ls[0] + b_u = b_us[0] + + return A, b_l, b_u + + +def _milp_iv(c, integrality, bounds, constraints, options): + # objective IV + if issparse(c): + raise ValueError("`c` must be a dense array.") + c = np.atleast_1d(c).astype(np.float64) + if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)): + message = ("`c` must be a one-dimensional array of finite numbers " + "with at least one element.") + raise ValueError(message) + + # integrality IV + if issparse(integrality): + raise ValueError("`integrality` must be a dense array.") + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + if integrality is None: + integrality = 0 + try: + integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8) + except ValueError: + raise ValueError(message) + if integrality.min() < 0 or integrality.max() > 3: + raise ValueError(message) + + # bounds IV + if bounds is None: + bounds = Bounds(0, np.inf) + elif not isinstance(bounds, Bounds): + message = ("`bounds` must be convertible into an instance of " + "`scipy.optimize.Bounds`.") + try: + bounds = Bounds(*bounds) + except TypeError as exc: + raise ValueError(message) from exc + + try: + lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64) + ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64) + except (ValueError, TypeError) as exc: + message = ("`bounds.lb` and `bounds.ub` must contain reals and " + "be broadcastable to `c.shape`.") + raise ValueError(message) from exc + + # constraints IV + if not constraints: + constraints = [LinearConstraint(np.empty((0, c.size)), + np.empty((0,)), np.empty((0,)))] + try: + A, b_l, b_u = _constraints_to_components(constraints) + except ValueError as exc: + message = ("`constraints` (or each element within `constraints`) must " + "be convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + raise ValueError(message) from exc + + if A.shape != (b_l.size, c.size): + message = "The shape of `A` must be (len(b_l), len(c))." + raise ValueError(message) + indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64) + + # options IV + options = options or {} + supported_options = {'disp', 'presolve', 'time_limit', 'node_limit', + 'mip_rel_gap'} + unsupported_options = set(options).difference(supported_options) + if unsupported_options: + message = (f"Unrecognized options detected: {unsupported_options}. " + "These will be passed to HiGHS verbatim.") + warnings.warn(message, RuntimeWarning, stacklevel=3) + options_iv = {'log_to_console': options.pop("disp", False), + 'mip_max_nodes': options.pop("node_limit", None)} + options_iv.update(options) + + return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv + + +def milp(c, *, integrality=None, bounds=None, constraints=None, options=None): + r""" + Mixed-integer linear programming + + Solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & b_l \leq A x \leq b_u,\\ + & l \leq x \leq u, \\ + & x_i \in \mathbb{Z}, i \in X_i + + where :math:`x` is a vector of decision variables; + :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors; + :math:`A` is a matrix, and :math:`X_i` is the set of indices of + decision variables that must be integral. (In this context, a + variable that can assume only integer values is said to be "integral"; + it has an "integrality" constraint.) + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + b_l <= A @ x <= b_u + l <= x <= u + Specified elements of x must be integers + + By default, ``l = 0`` and ``u = np.inf`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1D dense array_like + The coefficients of the linear objective function to be minimized. + `c` is converted to a double precision array before the problem is + solved. + integrality : 1D dense array_like, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. `integrality` is converted + to an array of integers before the problem is solved. + + bounds : scipy.optimize.Bounds, optional + Bounds on the decision variables. Lower and upper bounds are converted + to double precision arrays before the problem is solved. The + ``keep_feasible`` parameter of the `Bounds` object is ignored. If + not specified, all decision variables are constrained to be + non-negative. + constraints : sequence of scipy.optimize.LinearConstraint, optional + Linear constraints of the optimization problem. Arguments may be + one of the following: + + 1. A single `LinearConstraint` object + 2. A single tuple that can be converted to a `LinearConstraint` object + as ``LinearConstraint(*constraints)`` + 3. A sequence composed entirely of objects of type 1. and 2. + + Before the problem is solved, all values are converted to double + precision, and the matrices of constraint coefficients are converted to + instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter + of `LinearConstraint` objects is ignored. + options : dict, optional + A dictionary of solver options. The following keys are recognized. + + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + node_limit : int, optional + The maximum number of nodes (linear program relaxations) to solve + before stopping. Default is no maximum number of nodes. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. + time_limit : float, optional + The maximum number of seconds allotted to solve the problem. + Default is no time limit. + mip_rel_gap : float, optional + Termination criterion for MIP solver: solver will terminate when + the gap between the primal objective value and the dual objective + bound, scaled by the primal objective value, is <= mip_rel_gap. + + Returns + ------- + res : OptimizeResult + An instance of :class:`scipy.optimize.OptimizeResult`. The object + is guaranteed to have the following attributes. + + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimal solution found. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem is infeasible. + + ``3`` : Problem is unbounded. + + ``4`` : Other; see message for details. + + success : bool + ``True`` when an optimal solution is found and ``False`` otherwise. + + message : str + A string descriptor of the exit status of the algorithm. + + The following attributes will also be present, but the values may be + ``None``, depending on the solution status. + + x : ndarray + The values of the decision variables that minimize the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + mip_node_count : int + The number of subproblems or "nodes" solved by the MILP solver. + mip_dual_bound : float + The MILP solver's final estimate of the lower bound on the optimal + solution. + mip_gap : float + The difference between the primal objective value and the dual + objective bound, scaled by the primal objective value. + + Notes + ----- + `milp` is a wrapper of the HiGHS linear optimization software [1]_. The + algorithm is deterministic, and it typically finds the global optimum of + moderately challenging mixed-integer linear programs (when it exists). + + References + ---------- + .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the problem at + https://en.wikipedia.org/wiki/Integer_programming#Example, which is + expressed as a maximization problem of two variables. Since `milp` requires + that the problem be expressed as a minimization problem, the objective + function coefficients on the decision variables are: + + >>> import numpy as np + >>> c = -np.array([0, 1]) + + Note the negative sign: we maximize the original objective function + by minimizing the negative of the objective function. + + We collect the coefficients of the constraints into arrays like: + + >>> A = np.array([[-1, 1], [3, 2], [2, 3]]) + >>> b_u = np.array([1, 12, 12]) + >>> b_l = np.full_like(b_u, -np.inf) + + Because there is no lower limit on these constraints, we have defined a + variable ``b_l`` full of values representing negative infinity. This may + be unfamiliar to users of `scipy.optimize.linprog`, which only accepts + "less than" (or "upper bound") inequality constraints of the form + ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints + ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than" + inequality constraints, "less than" inequality constraints, and equality + constraints concisely. + + These arrays are collected into a single `LinearConstraint` object like: + + >>> from scipy.optimize import LinearConstraint + >>> constraints = LinearConstraint(A, b_l, b_u) + + The non-negativity bounds on the decision variables are enforced by + default, so we do not need to provide an argument for `bounds`. + + Finally, the problem states that both decision variables must be integers: + + >>> integrality = np.ones_like(c) + + We solve the problem like: + + >>> from scipy.optimize import milp + >>> res = milp(c=c, constraints=constraints, integrality=integrality) + >>> res.x + [1.0, 2.0] + + Note that had we solved the relaxed problem (without integrality + constraints): + + >>> res = milp(c=c, constraints=constraints) # OR: + >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u) + >>> res.x + [1.8, 2.8] + + we would not have obtained the correct solution by rounding to the nearest + integers. + + Other examples are given :ref:`in the tutorial `. + + """ + args_iv = _milp_iv(c, integrality, bounds, constraints, options) + c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv + + highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u, + lb, ub, integrality, options) + + res = {} + + # Convert to scipy-style status and message + highs_status = highs_res.get('status', None) + highs_message = highs_res.get('message', None) + status, message = _highs_to_scipy_status_message(highs_status, + highs_message) + res['status'] = status + res['message'] = message + res['success'] = (status == 0) + x = highs_res.get('x', None) + res['x'] = np.array(x) if x is not None else None + res['fun'] = highs_res.get('fun', None) + res['mip_node_count'] = highs_res.get('mip_node_count', None) + res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None) + res['mip_gap'] = highs_res.get('mip_gap', None) + + return OptimizeResult(res) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e33f9ab02aa2efd0c6c33ed2135942827917f41c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..afb2087ac25d0e8c34d81860f6dd26b14895340d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py @@ -0,0 +1,1157 @@ +import warnings +from . import _minpack + +import numpy as np +from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater, + asarray, inf, + finfo, inexact, issubdtype, dtype) +from scipy import linalg +from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError +from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning +from ._lsq import least_squares +# from ._lsq.common import make_strictly_feasible +from ._lsq.least_squares import prepare_bounds +from scipy.optimize._minimize import Bounds + +# deprecated imports to be removed in SciPy 1.13.0 +from numpy import dot, eye, take # noqa: F401 +from numpy.linalg import inv # noqa: F401 + +error = _minpack.error + +__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] + + +def _check_func(checker, argname, thefunc, x0, args, numinputs, + output_shape=None): + res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + if (output_shape is not None) and (shape(res) != output_shape): + if (output_shape[0] != 1): + if len(output_shape) > 1: + if output_shape[1] == 1: + return shape(res) + msg = f"{checker}: there is a mismatch between the input and output " \ + f"shape of the '{argname}' argument" + func_name = getattr(thefunc, '__name__', None) + if func_name: + msg += " '%s'." % func_name + else: + msg += "." + msg += f'Shape should be {output_shape} but it is {shape(res)}.' + raise TypeError(msg) + if issubdtype(res.dtype, inexact): + dt = res.dtype + else: + dt = dtype(float) + return shape(res), dt + + +def fsolve(func, x0, args=(), fprime=None, full_output=0, + col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, + epsfcn=None, factor=100, diag=None): + """ + Find the roots of a function. + + Return the roots of the (non-linear) equations defined by + ``func(x) = 0`` given a starting estimate. + + Parameters + ---------- + func : callable ``f(x, *args)`` + A function that takes at least one (possibly vector) argument, + and returns a value of the same length. + x0 : ndarray + The starting estimate for the roots of ``func(x) = 0``. + args : tuple, optional + Any extra arguments to `func`. + fprime : callable ``f(x, *args)``, optional + A function to compute the Jacobian of `func` with derivatives + across the rows. By default, the Jacobian will be estimated. + full_output : bool, optional + If True, return optional outputs. + col_deriv : bool, optional + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float, optional + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int, optional + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple, optional + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + epsfcn : float, optional + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `epsfcn` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the + variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for + an unsuccessful call). + infodict : dict + A dictionary of optional outputs with the keys: + + ``nfev`` + number of function calls + ``njev`` + number of Jacobian calls + ``fvec`` + function evaluated at the output + ``fjac`` + the orthogonal matrix, q, produced by the QR + factorization of the final approximate Jacobian + matrix, stored column wise + ``r`` + upper triangular matrix produced by QR factorization + of the same matrix + ``qtf`` + the vector ``(transpose(q) * fvec)`` + + ier : int + An integer flag. Set to 1 if a solution was found, otherwise refer + to `mesg` for more information. + mesg : str + If no solution is found, `mesg` details the cause of failure. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See the ``method='hybr'`` in particular. + + Notes + ----- + ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. + + Examples + -------- + Find a solution to the system of equations: + ``x0*cos(x1) = 4, x1*x0 - x1 = 5``. + + >>> import numpy as np + >>> from scipy.optimize import fsolve + >>> def func(x): + ... return [x[0] * np.cos(x[1]) - 4, + ... x[1] * x[0] - x[1] - 5] + >>> root = fsolve(func, [1, 1]) + >>> root + array([6.50409711, 0.90841421]) + >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. + array([ True, True]) + + """ + options = {'col_deriv': col_deriv, + 'xtol': xtol, + 'maxfev': maxfev, + 'band': band, + 'eps': epsfcn, + 'factor': factor, + 'diag': diag} + + res = _root_hybr(func, x0, args, jac=fprime, **options) + if full_output: + x = res['x'] + info = {k: res.get(k) + for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res} + info['fvec'] = res['fun'] + return x, info, res['status'], res['message'] + else: + status = res['status'] + msg = res['message'] + if status == 0: + raise TypeError(msg) + elif status == 1: + pass + elif status in [2, 3, 4, 5]: + warnings.warn(msg, RuntimeWarning, stacklevel=2) + else: + raise TypeError(msg) + return res['x'] + + +def _root_hybr(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, + factor=100, diag=None, **unknown_options): + """ + Find the roots of a multivariate function using MINPACK's hybrd and + hybrj routines (modified Powell method). + + Options + ------- + col_deriv : bool + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + eps : float + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `eps` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the + variables. + + """ + _check_unknown_options(unknown_options) + epsfcn = eps + + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + Dfun = jac + if Dfun is None: + if band is None: + ml, mu = -10, -10 + else: + ml, mu = band[:2] + if maxfev == 0: + maxfev = 200 * (n + 1) + retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, + ml, mu, epsfcn, factor, diag) + else: + _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) + if (maxfev == 0): + maxfev = 100 * (n + 1) + retval = _minpack._hybrj(func, Dfun, x0, args, 1, + col_deriv, xtol, maxfev, factor, diag) + + x, status = retval[0], retval[-1] + + errors = {0: "Improper input parameters were entered.", + 1: "The solution converged.", + 2: "The number of calls to function has " + "reached maxfev = %d." % maxfev, + 3: "xtol=%f is too small, no further improvement " + "in the approximate\n solution " + "is possible." % xtol, + 4: "The iteration is not making good progress, as measured " + "by the \n improvement from the last five " + "Jacobian evaluations.", + 5: "The iteration is not making good progress, " + "as measured by the \n improvement from the last " + "ten iterations.", + 'unknown': "An error occurred."} + + info = retval[1] + info['fun'] = info.pop('fvec') + sol = OptimizeResult(x=x, success=(status == 1), status=status, + method="hybr") + sol.update(info) + try: + sol['message'] = errors[status] + except KeyError: + sol['message'] = errors['unknown'] + + return sol + + +LEASTSQ_SUCCESS = [1, 2, 3, 4] +LEASTSQ_FAILURE = [5, 6, 7, 8] + + +def leastsq(func, x0, args=(), Dfun=None, full_output=False, + col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + """ + Minimize the sum of squares of a set of equations. + + :: + + x = arg min(sum(func(y)**2,axis=0)) + y + + Parameters + ---------- + func : callable + Should take at least one (possibly length ``N`` vector) argument and + returns ``M`` floating point numbers. It must not return NaNs or + fitting might fail. ``M`` must be greater than or equal to ``N``. + x0 : ndarray + The starting estimate for the minimization. + args : tuple, optional + Any extra arguments to func are placed in this tuple. + Dfun : callable, optional + A function or method to compute the Jacobian of func with derivatives + across the rows. If this is None, the Jacobian will be estimated. + full_output : bool, optional + If ``True``, return all optional outputs (not just `x` and `ier`). + col_deriv : bool, optional + If ``True``, specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float, optional + Relative error desired in the sum of squares. + xtol : float, optional + Relative error desired in the approximate solution. + gtol : float, optional + Orthogonality desired between the function vector and the columns of + the Jacobian. + maxfev : int, optional + The maximum number of calls to the function. If `Dfun` is provided, + then the default `maxfev` is 100*(N+1) where N is the number of elements + in x0, otherwise the default `maxfev` is 200*(N+1). + epsfcn : float, optional + A variable used in determining a suitable step length for the forward- + difference approximation of the Jacobian (for Dfun=None). + Normally the actual step length will be sqrt(epsfcn)*x + If epsfcn is less than the machine precision, it is assumed that the + relative errors are of the order of the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for an unsuccessful + call). + cov_x : ndarray + The inverse of the Hessian. `fjac` and `ipvt` are used to construct an + estimate of the Hessian. A value of None indicates a singular matrix, + which means the curvature in parameters `x` is numerically flat. To + obtain the covariance matrix of the parameters `x`, `cov_x` must be + multiplied by the variance of the residuals -- see curve_fit. Only + returned if `full_output` is ``True``. + infodict : dict + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls + ``fvec`` + The function evaluated at the output + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + ``qtf`` + The vector (transpose(q) * fvec). + + Only returned if `full_output` is ``True``. + mesg : str + A string message giving information about the cause of failure. + Only returned if `full_output` is ``True``. + ier : int + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable 'mesg' gives more information. + + See Also + -------- + least_squares : Newer interface to solve nonlinear least-squares problems + with bounds on the variables. See ``method='lm'`` in particular. + + Notes + ----- + "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. + + cov_x is a Jacobian approximation to the Hessian of the least squares + objective function. + This approximation assumes that the objective function is based on the + difference between some observed target data (ydata) and a (non-linear) + function of the parameters `f(xdata, params)` :: + + func(params) = ydata - f(xdata, params) + + so that the objective function is :: + + min sum((ydata - f(xdata, params))**2, axis=0) + params + + The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, + or whether `x0` is a scalar. + + Examples + -------- + >>> from scipy.optimize import leastsq + >>> def func(x): + ... return 2*(x-3)**2+1 + >>> leastsq(func, 0) + (array([2.99999999]), 1) + + """ + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] + + if n > m: + raise TypeError(f"Improper input: func input vector length N={n} must" + f" not exceed func output vector length M={m}") + + if epsfcn is None: + epsfcn = finfo(dtype).eps + + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100 * (n + 1) + retval = _minpack._lmder(func, Dfun, x0, args, full_output, + col_deriv, ftol, xtol, gtol, maxfev, + factor, diag) + + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + "in the sum of squares\n are at most %f" % ftol, None], + 2: ["The relative error between two consecutive " + "iterates is at most %f" % xtol, None], + 3: ["Both actual and predicted relative reductions in " + f"the sum of squares\n are at most {ftol:f} and the " + "relative error between two consecutive " + f"iterates is at \n most {xtol:f}", None], + 4: ["The cosine of the angle between func(x) and any " + "column of the\n Jacobian is at most %f in " + "absolute value" % gtol, None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: ["ftol=%f is too small, no further reduction " + "in the sum of squares\n is possible." % ftol, + ValueError], + 7: ["xtol=%f is too small, no further improvement in " + "the approximate\n solution is possible." % xtol, + ValueError], + 8: ["gtol=%f is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine " + "precision." % gtol, ValueError]} + + # The FORTRAN return value (possible return values are >= 0 and <= 8) + info = retval[-1] + + if full_output: + cov_x = None + if info in LEASTSQ_SUCCESS: + # This was + # perm = take(eye(n), retval[1]['ipvt'] - 1, 0) + # r = triu(transpose(retval[1]['fjac'])[:n, :]) + # R = dot(r, perm) + # cov_x = inv(dot(transpose(R), R)) + # but the explicit dot product was not necessary and sometimes + # the result was not symmetric positive definite. See gh-4555. + perm = retval[1]['ipvt'] - 1 + n = len(perm) + r = triu(transpose(retval[1]['fjac'])[:n, :]) + inv_triu = linalg.get_lapack_funcs('trtri', (r,)) + try: + # inverse of permuted matrix is a permutation of matrix inverse + invR, trtri_info = inv_triu(r) # default: upper, non-unit diag + if trtri_info != 0: # explicit comparison for readability + raise LinAlgError(f'trtri returned info {trtri_info}') + invR[perm] = invR.copy() + cov_x = invR @ invR.T + except (LinAlgError, ValueError): + pass + return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) + else: + if info in LEASTSQ_FAILURE: + warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2) + elif info == 0: + raise errors[info][1](errors[info][0]) + return retval[0], info + + +def _lightweight_memoizer(f): + # very shallow memoization to address gh-13670: only remember the first set + # of parameters and corresponding function value, and only attempt to use + # them twice (the number of times the function is evaluated at x0). + def _memoized_func(params): + if _memoized_func.skip_lookup: + return f(params) + + if np.all(_memoized_func.last_params == params): + return _memoized_func.last_val + elif _memoized_func.last_params is not None: + _memoized_func.skip_lookup = True + + val = f(params) + + if _memoized_func.last_params is None: + _memoized_func.last_params = np.copy(params) + _memoized_func.last_val = val + + return val + + _memoized_func.last_params = None + _memoized_func.last_val = None + _memoized_func.skip_lookup = False + return _memoized_func + + +def _wrap_func(func, xdata, ydata, transform): + if transform is None: + def func_wrapped(params): + return func(xdata, *params) - ydata + elif transform.size == 1 or transform.ndim == 1: + def func_wrapped(params): + return transform * (func(xdata, *params) - ydata) + else: + # Chisq = (y - yd)^T C^{-1} (y-yd) + # transform = L such that C = L L^T + # C^{-1} = L^{-T} L^{-1} + # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) + # Define (y-yd)' = L^{-1} (y-yd) + # by solving + # L (y-yd)' = (y-yd) + # and minimize (y-yd)'^T (y-yd)' + def func_wrapped(params): + return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) + return func_wrapped + + +def _wrap_jac(jac, xdata, transform): + if transform is None: + def jac_wrapped(params): + return jac(xdata, *params) + elif transform.ndim == 1: + def jac_wrapped(params): + return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) + else: + def jac_wrapped(params): + return solve_triangular(transform, + np.asarray(jac(xdata, *params)), + lower=True) + return jac_wrapped + + +def _initialize_feasible(lb, ub): + p0 = np.ones_like(lb) + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + mask = lb_finite & ub_finite + p0[mask] = 0.5 * (lb[mask] + ub[mask]) + + mask = lb_finite & ~ub_finite + p0[mask] = lb[mask] + 1 + + mask = ~lb_finite & ub_finite + p0[mask] = ub[mask] - 1 + + return p0 + + +def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, + check_finite=None, bounds=(-np.inf, np.inf), method=None, + jac=None, *, full_output=False, nan_policy=None, + **kwargs): + """ + Use non-linear least squares to fit a function, f, to data. + + Assumes ``ydata = f(xdata, *params) + eps``. + + Parameters + ---------- + f : callable + The model function, f(x, ...). It must take the independent + variable as the first argument and the parameters to fit as + separate remaining arguments. + xdata : array_like + The independent variable where the data is measured. + Should usually be an M-length sequence or an (k,M)-shaped array for + functions with k predictors, and each element should be float + convertible if it is an array like object. + ydata : array_like + The dependent data, a length M array - nominally ``f(xdata, ...)``. + p0 : array_like, optional + Initial guess for the parameters (length N). If None, then the + initial values will all be 1 (if the number of parameters for the + function can be determined using introspection, otherwise a + ValueError is raised). + sigma : None or scalar or M-length sequence or MxM array, optional + Determines the uncertainty in `ydata`. If we define residuals as + ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` + depends on its number of dimensions: + + - A scalar or 1-D `sigma` should contain values of standard deviations of + errors in `ydata`. In this case, the optimized function is + ``chisq = sum((r / sigma) ** 2)``. + + - A 2-D `sigma` should contain the covariance matrix of + errors in `ydata`. In this case, the optimized function is + ``chisq = r.T @ inv(sigma) @ r``. + + .. versionadded:: 0.19 + + None (default) is equivalent of 1-D `sigma` filled with ones. + absolute_sigma : bool, optional + If True, `sigma` is used in an absolute sense and the estimated parameter + covariance `pcov` reflects these absolute values. + + If False (default), only the relative magnitudes of the `sigma` values matter. + The returned parameter covariance matrix `pcov` is based on scaling + `sigma` by a constant factor. This constant is set by demanding that the + reduced `chisq` for the optimal parameters `popt` when using the + *scaled* `sigma` equals unity. In other words, `sigma` is scaled to + match the sample variance of the residuals after the fit. Default is False. + Mathematically, + ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` + check_finite : bool, optional + If True, check that the input arrays do not contain nans of infs, + and raise a ValueError if they do. Setting this parameter to + False may silently produce nonsensical results if the input arrays + do contain nans. Default is True if `nan_policy` is not specified + explicitly and False otherwise. + bounds : 2-tuple of array_like or `Bounds`, optional + Lower and upper bounds on parameters. Defaults to no bounds. + There are two ways to specify the bounds: + + - Instance of `Bounds` class. + + - 2-tuple of array_like: Each element of the tuple must be either + an array with the length equal to the number of parameters, or a + scalar (in which case the bound is taken to be the same for all + parameters). Use ``np.inf`` with an appropriate sign to disable + bounds on all or some parameters. + + method : {'lm', 'trf', 'dogbox'}, optional + Method to use for optimization. See `least_squares` for more details. + Default is 'lm' for unconstrained problems and 'trf' if `bounds` are + provided. The method 'lm' won't work when the number of observations + is less than the number of variables, use 'trf' or 'dogbox' in this + case. + + .. versionadded:: 0.17 + jac : callable, string or None, optional + Function with signature ``jac(x, ...)`` which computes the Jacobian + matrix of the model function with respect to parameters as a dense + array_like structure. It will be scaled according to provided `sigma`. + If None (default), the Jacobian will be estimated numerically. + String keywords for 'trf' and 'dogbox' methods can be used to select + a finite difference scheme, see `least_squares`. + + .. versionadded:: 0.18 + full_output : boolean, optional + If True, this function returns additioal information: `infodict`, + `mesg`, and `ier`. + + .. versionadded:: 1.9 + nan_policy : {'raise', 'omit', None}, optional + Defines how to handle when input contains nan. + The following options are available (default is None): + + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + * None: no special handling of NaNs is performed + (except what is done by check_finite); the behavior when NaNs + are present is implementation-dependent and may change. + + Note that if this value is specified explicitly (not None), + `check_finite` will be set as False. + + .. versionadded:: 1.11 + **kwargs + Keyword arguments passed to `leastsq` for ``method='lm'`` or + `least_squares` otherwise. + + Returns + ------- + popt : array + Optimal values for the parameters so that the sum of the squared + residuals of ``f(xdata, *popt) - ydata`` is minimized. + pcov : 2-D array + The estimated approximate covariance of popt. The diagonals provide + the variance of the parameter estimate. To compute one standard + deviation errors on the parameters, use + ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between + `cov` and parameter error estimates is derived based on a linear + approximation to the model function around the optimum [1]. + When this approximation becomes inaccurate, `cov` may not provide an + accurate measure of uncertainty. + + How the `sigma` parameter affects the estimated covariance + depends on `absolute_sigma` argument, as described above. + + If the Jacobian matrix at the solution doesn't have a full rank, then + 'lm' method returns a matrix filled with ``np.inf``, on the other hand + 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute + the covariance matrix. Covariance matrices with large condition numbers + (e.g. computed with `numpy.linalg.cond`) may indicate that results are + unreliable. + infodict : dict (returned only if `full_output` is True) + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls. Methods 'trf' and 'dogbox' do not + count function calls for numerical Jacobian approximation, + as opposed to 'lm' method. + ``fvec`` + The residual values evaluated at the solution, for a 1-D `sigma` + this is ``(f(x, *popt) - ydata)/sigma``. + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + Method 'lm' only provides this information. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + Method 'lm' only provides this information. + ``qtf`` + The vector (transpose(q) * fvec). + Method 'lm' only provides this information. + + .. versionadded:: 1.9 + mesg : str (returned only if `full_output` is True) + A string message giving information about the solution. + + .. versionadded:: 1.9 + ier : int (returned only if `full_output` is True) + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable `mesg` gives more information. + + .. versionadded:: 1.9 + + Raises + ------ + ValueError + if either `ydata` or `xdata` contain NaNs, or if incompatible options + are used. + + RuntimeError + if the least-squares minimization fails. + + OptimizeWarning + if covariance of the parameters can not be estimated. + + See Also + -------- + least_squares : Minimize the sum of squares of nonlinear functions. + scipy.stats.linregress : Calculate a linear least squares regression for + two sets of measurements. + + Notes + ----- + Users should ensure that inputs `xdata`, `ydata`, and the output of `f` + are ``float64``, or else the optimization may return incorrect results. + + With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm + through `leastsq`. Note that this algorithm can only deal with + unconstrained problems. + + Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to + the docstring of `least_squares` for more information. + + Parameters to be fitted must have similar scale. Differences of multiple + orders of magnitude can lead to incorrect results. For the 'trf' and + 'dogbox' methods, the `x_scale` keyword argument can be used to scale + the parameters. + + References + ---------- + [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear + regression in groundwater flow: Three case studies. Water Resources + Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804` + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import curve_fit + + >>> def func(x, a, b, c): + ... return a * np.exp(-b * x) + c + + Define the data to be fit with some noise: + + >>> xdata = np.linspace(0, 4, 50) + >>> y = func(xdata, 2.5, 1.3, 0.5) + >>> rng = np.random.default_rng() + >>> y_noise = 0.2 * rng.normal(size=xdata.size) + >>> ydata = y + y_noise + >>> plt.plot(xdata, ydata, 'b-', label='data') + + Fit for the parameters a, b, c of the function `func`: + + >>> popt, pcov = curve_fit(func, xdata, ydata) + >>> popt + array([2.56274217, 1.37268521, 0.47427475]) + >>> plt.plot(xdata, func(xdata, *popt), 'r-', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + Constrain the optimization to the region of ``0 <= a <= 3``, + ``0 <= b <= 1`` and ``0 <= c <= 0.5``: + + >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) + >>> popt + array([2.43736712, 1. , 0.34463856]) + >>> plt.plot(xdata, func(xdata, *popt), 'g--', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend() + >>> plt.show() + + For reliable results, the model `func` should not be overparametrized; + redundant parameters can cause unreliable covariance matrices and, in some + cases, poorer quality fits. As a quick check of whether the model may be + overparameterized, calculate the condition number of the covariance matrix: + + >>> np.linalg.cond(pcov) + 34.571092161547405 # may vary + + The value is small, so it does not raise much concern. If, however, we were + to add a fourth parameter ``d`` to `func` with the same effect as ``a``: + + >>> def func2(x, a, b, c, d): + ... return a * d * np.exp(-b * x) + c # a and d are redundant + >>> popt, pcov = curve_fit(func2, xdata, ydata) + >>> np.linalg.cond(pcov) + 1.13250718925596e+32 # may vary + + Such a large value is cause for concern. The diagonal elements of the + covariance matrix, which is related to uncertainty of the fit, gives more + information: + + >>> np.diag(pcov) + array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary + + Note that the first and last terms are much larger than the other elements, + suggesting that the optimal values of these parameters are ambiguous and + that only one of these parameters is needed in the model. + + If the optimal parameters of `f` differ by multiple orders of magnitude, the + resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any + results: + + >>> ydata = func(xdata, 500000, 0.01, 15) + >>> try: + ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf') + ... except RuntimeError as e: + ... print(e) + Optimal parameters not found: The maximum number of function evaluations is exceeded. + + If parameter scale is roughly known beforehand, it can be defined in + `x_scale` argument: + + >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf', + ... x_scale = [1000, 1, 1]) + >>> popt + array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01]) + """ + if p0 is None: + # determine number of parameters by inspecting the function + sig = _getfullargspec(f) + args = sig.args + if len(args) < 2: + raise ValueError("Unable to determine number of fit parameters.") + n = len(args) - 1 + else: + p0 = np.atleast_1d(p0) + n = p0.size + + if isinstance(bounds, Bounds): + lb, ub = bounds.lb, bounds.ub + else: + lb, ub = prepare_bounds(bounds, n) + if p0 is None: + p0 = _initialize_feasible(lb, ub) + + bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) + if method is None: + if bounded_problem: + method = 'trf' + else: + method = 'lm' + + if method == 'lm' and bounded_problem: + raise ValueError("Method 'lm' only works for unconstrained problems. " + "Use 'trf' or 'dogbox' instead.") + + if check_finite is None: + check_finite = True if nan_policy is None else False + + # optimization may produce garbage for float32 inputs, cast them to float64 + if check_finite: + ydata = np.asarray_chkfinite(ydata, float) + else: + ydata = np.asarray(ydata, float) + + if isinstance(xdata, (list, tuple, np.ndarray)): + # `xdata` is passed straight to the user-defined `f`, so allow + # non-array_like `xdata`. + if check_finite: + xdata = np.asarray_chkfinite(xdata, float) + else: + xdata = np.asarray(xdata, float) + + if ydata.size == 0: + raise ValueError("`ydata` must not be empty!") + + # nan handling is needed only if check_finite is False because if True, + # the x-y data are already checked, and they don't contain nans. + if not check_finite and nan_policy is not None: + if nan_policy == "propagate": + raise ValueError("`nan_policy='propagate'` is not supported " + "by this function.") + + policies = [None, 'raise', 'omit'] + x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy, + policies=policies) + y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy, + policies=policies) + + if (x_contains_nan or y_contains_nan) and nan_policy == 'omit': + # ignore NaNs for N dimensional arrays + has_nan = np.isnan(xdata) + has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1))) + has_nan |= np.isnan(ydata) + + xdata = xdata[..., ~has_nan] + ydata = ydata[~has_nan] + + # Determine type of sigma + if sigma is not None: + sigma = np.asarray(sigma) + + # if 1-D or a scalar, sigma are errors, define transform = 1/sigma + if sigma.size == 1 or sigma.shape == (ydata.size, ): + transform = 1.0 / sigma + # if 2-D, sigma is the covariance matrix, + # define transform = L such that L L^T = C + elif sigma.shape == (ydata.size, ydata.size): + try: + # scipy.linalg.cholesky requires lower=True to return L L^T = A + transform = cholesky(sigma, lower=True) + except LinAlgError as e: + raise ValueError("`sigma` must be positive definite.") from e + else: + raise ValueError("`sigma` has incorrect shape.") + else: + transform = None + + func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform)) + + if callable(jac): + jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform)) + elif jac is None and method != 'lm': + jac = '2-point' + + if 'args' in kwargs: + # The specification for the model function `f` does not support + # additional arguments. Refer to the `curve_fit` docstring for + # acceptable call signatures of `f`. + raise ValueError("'args' is not a supported keyword argument.") + + if method == 'lm': + # if ydata.size == 1, this might be used for broadcast. + if ydata.size != 1 and n > ydata.size: + raise TypeError(f"The number of func parameters={n} must not" + f" exceed the number of data points={ydata.size}") + res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) + popt, pcov, infodict, errmsg, ier = res + ysize = len(infodict['fvec']) + cost = np.sum(infodict['fvec'] ** 2) + if ier not in [1, 2, 3, 4]: + raise RuntimeError("Optimal parameters not found: " + errmsg) + else: + # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. + if 'max_nfev' not in kwargs: + kwargs['max_nfev'] = kwargs.pop('maxfev', None) + + res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, + **kwargs) + + if not res.success: + raise RuntimeError("Optimal parameters not found: " + res.message) + + infodict = dict(nfev=res.nfev, fvec=res.fun) + ier = res.status + errmsg = res.message + + ysize = len(res.fun) + cost = 2 * res.cost # res.cost is half sum of squares! + popt = res.x + + # Do Moore-Penrose inverse discarding zero singular values. + _, s, VT = svd(res.jac, full_matrices=False) + threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] + s = s[s > threshold] + VT = VT[:s.size] + pcov = np.dot(VT.T / s**2, VT) + + warn_cov = False + if pcov is None or np.isnan(pcov).any(): + # indeterminate covariance + pcov = zeros((len(popt), len(popt)), dtype=float) + pcov.fill(inf) + warn_cov = True + elif not absolute_sigma: + if ysize > p0.size: + s_sq = cost / (ysize - p0.size) + pcov = pcov * s_sq + else: + pcov.fill(inf) + warn_cov = True + + if warn_cov: + warnings.warn('Covariance of the parameters could not be estimated', + category=OptimizeWarning, stacklevel=2) + + if full_output: + return popt, pcov, infodict, errmsg, ier + else: + return popt, pcov + + +def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): + """Perform a simple check on the gradient for correctness. + + """ + + x = atleast_1d(x0) + n = len(x) + x = x.reshape((n,)) + fvec = atleast_1d(fcn(x, *args)) + m = len(fvec) + fvec = fvec.reshape((m,)) + ldfjac = m + fjac = atleast_1d(Dfcn(x, *args)) + fjac = fjac.reshape((m, n)) + if col_deriv == 0: + fjac = transpose(fjac) + + xp = zeros((n,), float) + err = zeros((m,), float) + fvecp = None + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) + + fvecp = atleast_1d(fcn(xp, *args)) + fvecp = fvecp.reshape((m,)) + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) + + good = (prod(greater(err, 0.5), axis=0)) + + return (good, err) + + +def _del2(p0, p1, d): + return p0 - np.square(p1 - p0) / d + + +def _relerr(actual, desired): + return (actual - desired) / desired + + +def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): + p0 = x0 + for i in range(maxiter): + p1 = func(p0, *args) + if use_accel: + p2 = func(p1, *args) + d = p2 - 2.0 * p1 + p0 + p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) + else: + p = p1 + relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) + if np.all(np.abs(relerr) < xtol): + return p + p0 = p + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): + """ + Find a fixed point of the function. + + Given a function of one or more variables and a starting point, find a + fixed point of the function: i.e., where ``func(x0) == x0``. + + Parameters + ---------- + func : function + Function to evaluate. + x0 : array_like + Fixed point of function. + args : tuple, optional + Extra arguments to `func`. + xtol : float, optional + Convergence tolerance, defaults to 1e-08. + maxiter : int, optional + Maximum number of iterations, defaults to 500. + method : {"del2", "iteration"}, optional + Method of finding the fixed-point, defaults to "del2", + which uses Steffensen's Method with Aitken's ``Del^2`` + convergence acceleration [1]_. The "iteration" method simply iterates + the function until convergence is detected, without attempting to + accelerate the convergence. + + References + ---------- + .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c1, c2): + ... return np.sqrt(c1/(x+c2)) + >>> c1 = np.array([10,12.]) + >>> c2 = np.array([3, 5.]) + >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) + array([ 1.4920333 , 1.37228132]) + + """ + use_accel = {'del2': True, 'iteration': False}[method] + x0 = _asarray_validated(x0, as_inexact=True) + return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4d5e0f6ea4cafe723f9133d1ac59ecc673c3df89 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nnls.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nnls.py new file mode 100644 index 0000000000000000000000000000000000000000..17fcdc9e4cc52b1839cd938f21a78256cfb19436 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nnls.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.linalg import solve, LinAlgWarning +import warnings + +__all__ = ['nnls'] + + +def nnls(A, b, maxiter=None, *, atol=None): + """ + Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. + + This problem, often called as NonNegative Least Squares, is a convex + optimization problem with convex constraints. It typically arises when + the ``x`` models quantities for which only nonnegative values are + attainable; weight of ingredients, component costs and so on. + + Parameters + ---------- + A : (m, n) ndarray + Coefficient array + b : (m,) ndarray, float + Right-hand side vector. + maxiter: int, optional + Maximum number of iterations, optional. Default value is ``3 * n``. + atol: float + Tolerance value used in the algorithm to assess closeness to zero in + the projected residual ``(A.T @ (A x - b)`` entries. Increasing this + value relaxes the solution constraints. A typical relaxation value can + be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``. + This value is not set as default since the norm operation becomes + expensive for large problems hence can be used only when necessary. + + Returns + ------- + x : ndarray + Solution vector. + rnorm : float + The 2-norm of the residual, ``|| Ax-b ||_2``. + + See Also + -------- + lsq_linear : Linear least squares with bounds on the variables + + Notes + ----- + The code is based on [2]_ which is an improved version of the classical + algorithm of [1]_. It utilizes an active set method and solves the KKT + (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem. + + References + ---------- + .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM, + 1995, :doi:`10.1137/1.9781611971217` + .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity- + Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997, + :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L` + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import nnls + ... + >>> A = np.array([[1, 0], [1, 0], [0, 1]]) + >>> b = np.array([2, 1, 1]) + >>> nnls(A, b) + (array([1.5, 1. ]), 0.7071067811865475) + + >>> b = np.array([-1, -1, -1]) + >>> nnls(A, b) + (array([0., 0.]), 1.7320508075688772) + + """ + + A = np.asarray_chkfinite(A) + b = np.asarray_chkfinite(b) + + if len(A.shape) != 2: + raise ValueError("Expected a two-dimensional array (matrix)" + + f", but the shape of A is {A.shape}") + if len(b.shape) != 1: + raise ValueError("Expected a one-dimensional array (vector)" + + f", but the shape of b is {b.shape}") + + m, n = A.shape + + if m != b.shape[0]: + raise ValueError( + "Incompatible dimensions. The first dimension of " + + f"A is {m}, while the shape of b is {(b.shape[0], )}") + + x, rnorm, mode = _nnls(A, b, maxiter, tol=atol) + if mode != 1: + raise RuntimeError("Maximum number of iterations reached.") + + return x, rnorm + + +def _nnls(A, b, maxiter=None, tol=None): + """ + This is a single RHS algorithm from ref [2] above. For multiple RHS + support, the algorithm is given in :doi:`10.1002/cem.889` + """ + m, n = A.shape + + AtA = A.T @ A + Atb = b @ A # Result is 1D - let NumPy figure it out + + if not maxiter: + maxiter = 3*n + if tol is None: + tol = 10 * max(m, n) * np.spacing(1.) + + # Initialize vars + x = np.zeros(n, dtype=np.float64) + s = np.zeros(n, dtype=np.float64) + # Inactive constraint switches + P = np.zeros(n, dtype=bool) + + # Projected residual + w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term + + # Overall iteration counter + # Outer loop is not counted, inner iter is counted across outer spins + iter = 0 + + while (not P.all()) and (w[~P] > tol).any(): # B + # Get the "most" active coeff index and move to inactive set + k = np.argmax(w * (~P)) # B.2 + P[k] = True # B.3 + + # Iteration solution + s[:] = 0. + # B.4 + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message='Ill-conditioned matrix', + category=LinAlgWarning) + s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False) + + # Inner loop + while (iter < maxiter) and (s[P].min() < 0): # C.1 + iter += 1 + inds = P * (s < 0) + alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2 + x *= (1 - alpha) + x += alpha*s + P[x <= tol] = False + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message='Ill-conditioned matrix', + category=LinAlgWarning) + s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', + check_finite=False) + s[~P] = 0 # C.6 + + x[:] = s[:] + w[:] = Atb - AtA @ x + + if iter == maxiter: + # Typically following line should return + # return x, np.linalg.norm(A@x - b), -1 + # however at the top level, -1 raises an exception wasting norm + # Instead return dummy number 0. + return x, 0., -1 + + return x, np.linalg.norm(A@x - b), 1 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nonlin.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c429fd30da5ff138a7141b871126e4db3a681b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_nonlin.py @@ -0,0 +1,1584 @@ +# Copyright (C) 2009, Pauli Virtanen +# Distributed under the same license as SciPy. + +import inspect +import sys +import warnings + +import numpy as np +from numpy import asarray, dot, vdot + +from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError +import scipy.sparse.linalg +import scipy.sparse +from scipy.linalg import get_blas_funcs +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._linesearch import scalar_search_wolfe1, scalar_search_armijo + + +__all__ = [ + 'broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'newton_krylov', + 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence'] + +#------------------------------------------------------------------------------ +# Utility functions +#------------------------------------------------------------------------------ + + +class NoConvergence(Exception): + """Exception raised when nonlinear solver fails to converge within the specified + `maxiter`.""" + pass + + +def maxnorm(x): + return np.absolute(x).max() + + +def _as_inexact(x): + """Return `x` as an array, of either floats or complex floats""" + x = asarray(x) + if not np.issubdtype(x.dtype, np.inexact): + return asarray(x, dtype=np.float64) + return x + + +def _array_like(x, x0): + """Return ndarray `x` as same array subclass and shape as `x0`""" + x = np.reshape(x, np.shape(x0)) + wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) + return wrap(x) + + +def _safe_norm(v): + if not np.isfinite(v).all(): + return np.array(np.inf) + return norm(v) + +#------------------------------------------------------------------------------ +# Generic nonlinear solver machinery +#------------------------------------------------------------------------------ + + +_doc_parts = dict( + params_basic=""" + F : function(x) -> f + Function whose root to find; should take and return an array-like + object. + xin : array_like + Initial guess for the solution + """.strip(), + params_extra=""" + iter : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + verbose : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + f_tol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + f_rtol : float, optional + Relative tolerance for the residual. If omitted, not used. + x_tol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + x_rtol : float, optional + Relative minimum step size. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in the + direction given by the Jacobian approximation. Defaults to 'armijo'. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. + + Returns + ------- + sol : ndarray + An array (of similar array type as `x0`) containing the final solution. + + Raises + ------ + NoConvergence + When a solution was not found. + + """.strip() +) + + +def _set_doc(obj): + if obj.__doc__: + obj.__doc__ = obj.__doc__ % _doc_parts + + +def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, + maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, + full_output=False, raise_exception=True): + """ + Find a root of a function, in a way suitable for large-scale problems. + + Parameters + ---------- + %(params_basic)s + jacobian : Jacobian + A Jacobian approximation: `Jacobian` object or something that + `asjacobian` can transform to one. Alternatively, a string specifying + which of the builtin Jacobian approximations to use: + + krylov, broyden1, broyden2, anderson + diagbroyden, linearmixing, excitingmixing + + %(params_extra)s + full_output : bool + If true, returns a dictionary `info` containing convergence + information. + raise_exception : bool + If True, a `NoConvergence` exception is raise if no solution is found. + + See Also + -------- + asjacobian, Jacobian + + Notes + ----- + This algorithm implements the inexact Newton method, with + backtracking or full line searches. Several Jacobian + approximations are available, including Krylov and Quasi-Newton + methods. + + References + ---------- + .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear + Equations\". Society for Industrial and Applied Mathematics. (1995) + https://archive.siam.org/books/kelley/fr16/ + + """ + # Can't use default parameters because it's being explicitly passed as None + # from the calling function, so we need to set it here. + tol_norm = maxnorm if tol_norm is None else tol_norm + condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, + x_tol=x_tol, x_rtol=x_rtol, + iter=iter, norm=tol_norm) + + x0 = _as_inexact(x0) + def func(z): + return _as_inexact(F(_array_like(z, x0))).flatten() + x = x0.flatten() + + dx = np.full_like(x, np.inf) + Fx = func(x) + Fx_norm = norm(Fx) + + jacobian = asjacobian(jacobian) + jacobian.setup(x.copy(), Fx, func) + + if maxiter is None: + if iter is not None: + maxiter = iter + 1 + else: + maxiter = 100*(x.size+1) + + if line_search is True: + line_search = 'armijo' + elif line_search is False: + line_search = None + + if line_search not in (None, 'armijo', 'wolfe'): + raise ValueError("Invalid line search") + + # Solver tolerance selection + gamma = 0.9 + eta_max = 0.9999 + eta_treshold = 0.1 + eta = 1e-3 + + for n in range(maxiter): + status = condition.check(Fx, x, dx) + if status: + break + + # The tolerance, as computed for scipy.sparse.linalg.* routines + tol = min(eta, eta*Fx_norm) + dx = -jacobian.solve(Fx, tol=tol) + + if norm(dx) == 0: + raise ValueError("Jacobian inversion yielded zero vector. " + "This indicates a bug in the Jacobian " + "approximation.") + + # Line search, or Newton step + if line_search: + s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, + line_search) + else: + s = 1.0 + x = x + dx + Fx = func(x) + Fx_norm_new = norm(Fx) + + jacobian.update(x.copy(), Fx) + + if callback: + callback(x, Fx) + + # Adjust forcing parameters for inexact methods + eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 + if gamma * eta**2 < eta_treshold: + eta = min(eta_max, eta_A) + else: + eta = min(eta_max, max(eta_A, gamma*eta**2)) + + Fx_norm = Fx_norm_new + + # Print status + if verbose: + sys.stdout.write("%d: |F(x)| = %g; step %g\n" % ( + n, tol_norm(Fx), s)) + sys.stdout.flush() + else: + if raise_exception: + raise NoConvergence(_array_like(x, x0)) + else: + status = 2 + + if full_output: + info = {'nit': condition.iteration, + 'fun': Fx, + 'status': status, + 'success': status == 1, + 'message': {1: 'A solution was found at the specified ' + 'tolerance.', + 2: 'The maximum number of iterations allowed ' + 'has been reached.' + }[status] + } + return _array_like(x, x0), info + else: + return _array_like(x, x0) + + +_set_doc(nonlin_solve) + + +def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, + smin=1e-2): + tmp_s = [0] + tmp_Fx = [Fx] + tmp_phi = [norm(Fx)**2] + s_norm = norm(x) / norm(dx) + + def phi(s, store=True): + if s == tmp_s[0]: + return tmp_phi[0] + xt = x + s*dx + v = func(xt) + p = _safe_norm(v)**2 + if store: + tmp_s[0] = s + tmp_phi[0] = p + tmp_Fx[0] = v + return p + + def derphi(s): + ds = (abs(s) + s_norm + 1) * rdiff + return (phi(s+ds, store=False) - phi(s)) / ds + + if search_type == 'wolfe': + s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], + xtol=1e-2, amin=smin) + elif search_type == 'armijo': + s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], + amin=smin) + + if s is None: + # XXX: No suitable step length found. Take the full Newton step, + # and hope for the best. + s = 1.0 + + x = x + s*dx + if s == tmp_s[0]: + Fx = tmp_Fx[0] + else: + Fx = func(x) + Fx_norm = norm(Fx) + + return s, x, Fx, Fx_norm + + +class TerminationCondition: + """ + Termination condition for an iteration. It is terminated if + + - |F| < f_rtol*|F_0|, AND + - |F| < f_tol + + AND + + - |dx| < x_rtol*|x|, AND + - |dx| < x_tol + + """ + def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + iter=None, norm=maxnorm): + + if f_tol is None: + f_tol = np.finfo(np.float64).eps ** (1./3) + if f_rtol is None: + f_rtol = np.inf + if x_tol is None: + x_tol = np.inf + if x_rtol is None: + x_rtol = np.inf + + self.x_tol = x_tol + self.x_rtol = x_rtol + self.f_tol = f_tol + self.f_rtol = f_rtol + + self.norm = norm + + self.iter = iter + + self.f0_norm = None + self.iteration = 0 + + def check(self, f, x, dx): + self.iteration += 1 + f_norm = self.norm(f) + x_norm = self.norm(x) + dx_norm = self.norm(dx) + + if self.f0_norm is None: + self.f0_norm = f_norm + + if f_norm == 0: + return 1 + + if self.iter is not None: + # backwards compatibility with SciPy 0.6.0 + return 2 * (self.iteration > self.iter) + + # NB: condition must succeed for rtol=inf even if norm == 0 + return int((f_norm <= self.f_tol + and f_norm/self.f_rtol <= self.f0_norm) + and (dx_norm <= self.x_tol + and dx_norm/self.x_rtol <= x_norm)) + + +#------------------------------------------------------------------------------ +# Generic Jacobian approximation +#------------------------------------------------------------------------------ + +class Jacobian: + """ + Common interface for Jacobians or Jacobian approximations. + + The optional methods come useful when implementing trust region + etc., algorithms that often require evaluating transposes of the + Jacobian. + + Methods + ------- + solve + Returns J^-1 * v + update + Updates Jacobian to point `x` (where the function has residual `Fx`) + + matvec : optional + Returns J * v + rmatvec : optional + Returns A^H * v + rsolve : optional + Returns A^-H * v + matmat : optional + Returns A * V, where V is a dense matrix with dimensions (N,K). + todense : optional + Form the dense Jacobian matrix. Necessary for dense trust region + algorithms, and useful for testing. + + Attributes + ---------- + shape + Matrix dimensions (M, N) + dtype + Data type of the matrix. + func : callable, optional + Function the Jacobian corresponds to + + """ + + def __init__(self, **kw): + names = ["solve", "update", "matvec", "rmatvec", "rsolve", + "matmat", "todense", "shape", "dtype"] + for name, value in kw.items(): + if name not in names: + raise ValueError("Unknown keyword argument %s" % name) + if value is not None: + setattr(self, name, kw[name]) + + + if hasattr(self, "todense"): + def __array__(self, dtype=None, copy=None): + if dtype is not None: + raise ValueError(f"`dtype` must be None, was {dtype}") + return self.todense() + + def aspreconditioner(self): + return InverseJacobian(self) + + def solve(self, v, tol=0): + raise NotImplementedError + + def update(self, x, F): + pass + + def setup(self, x, F, func): + self.func = func + self.shape = (F.size, x.size) + self.dtype = F.dtype + if self.__class__.setup is Jacobian.setup: + # Call on the first point unless overridden + self.update(x, F) + + +class InverseJacobian: + def __init__(self, jacobian): + self.jacobian = jacobian + self.matvec = jacobian.solve + self.update = jacobian.update + if hasattr(jacobian, 'setup'): + self.setup = jacobian.setup + if hasattr(jacobian, 'rsolve'): + self.rmatvec = jacobian.rsolve + + @property + def shape(self): + return self.jacobian.shape + + @property + def dtype(self): + return self.jacobian.dtype + + +def asjacobian(J): + """ + Convert given object to one suitable for use as a Jacobian. + """ + spsolve = scipy.sparse.linalg.spsolve + if isinstance(J, Jacobian): + return J + elif inspect.isclass(J) and issubclass(J, Jacobian): + return J() + elif isinstance(J, np.ndarray): + if J.ndim > 2: + raise ValueError('array must have rank <= 2') + J = np.atleast_2d(np.asarray(J)) + if J.shape[0] != J.shape[1]: + raise ValueError('array must be square') + + return Jacobian(matvec=lambda v: dot(J, v), + rmatvec=lambda v: dot(J.conj().T, v), + solve=lambda v, tol=0: solve(J, v), + rsolve=lambda v, tol=0: solve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif scipy.sparse.issparse(J): + if J.shape[0] != J.shape[1]: + raise ValueError('matrix must be square') + return Jacobian(matvec=lambda v: J @ v, + rmatvec=lambda v: J.conj().T @ v, + solve=lambda v, tol=0: spsolve(J, v), + rsolve=lambda v, tol=0: spsolve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): + return Jacobian(matvec=getattr(J, 'matvec'), + rmatvec=getattr(J, 'rmatvec'), + solve=J.solve, + rsolve=getattr(J, 'rsolve'), + update=getattr(J, 'update'), + setup=getattr(J, 'setup'), + dtype=J.dtype, + shape=J.shape) + elif callable(J): + # Assume it's a function J(x) that returns the Jacobian + class Jac(Jacobian): + def update(self, x, F): + self.x = x + + def solve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m, v) + elif scipy.sparse.issparse(m): + return spsolve(m, v) + else: + raise ValueError("Unknown matrix type") + + def matvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m, v) + elif scipy.sparse.issparse(m): + return m @ v + else: + raise ValueError("Unknown matrix type") + + def rsolve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m.conj().T, v) + elif scipy.sparse.issparse(m): + return spsolve(m.conj().T, v) + else: + raise ValueError("Unknown matrix type") + + def rmatvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m.conj().T, v) + elif scipy.sparse.issparse(m): + return m.conj().T @ v + else: + raise ValueError("Unknown matrix type") + return Jac() + elif isinstance(J, str): + return dict(broyden1=BroydenFirst, + broyden2=BroydenSecond, + anderson=Anderson, + diagbroyden=DiagBroyden, + linearmixing=LinearMixing, + excitingmixing=ExcitingMixing, + krylov=KrylovJacobian)[J]() + else: + raise TypeError('Cannot convert object to a Jacobian') + + +#------------------------------------------------------------------------------ +# Broyden +#------------------------------------------------------------------------------ + +class GenericBroyden(Jacobian): + def setup(self, x0, f0, func): + Jacobian.setup(self, x0, f0, func) + self.last_f = f0 + self.last_x = x0 + + if hasattr(self, 'alpha') and self.alpha is None: + # Autoscale the initial Jacobian parameter + # unless we have already guessed the solution. + normf0 = norm(f0) + if normf0: + self.alpha = 0.5*max(norm(x0), 1) / normf0 + else: + self.alpha = 1.0 + + def _update(self, x, f, dx, df, dx_norm, df_norm): + raise NotImplementedError + + def update(self, x, f): + df = f - self.last_f + dx = x - self.last_x + self._update(x, f, dx, df, norm(dx), norm(df)) + self.last_f = f + self.last_x = x + + +class LowRankMatrix: + r""" + A matrix represented as + + .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger + + However, if the rank of the matrix reaches the dimension of the vectors, + full matrix representation will be used thereon. + + """ + + def __init__(self, alpha, n, dtype): + self.alpha = alpha + self.cs = [] + self.ds = [] + self.n = n + self.dtype = dtype + self.collapsed = None + + @staticmethod + def _matvec(v, alpha, cs, ds): + axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], + cs[:1] + [v]) + w = alpha * v + for c, d in zip(cs, ds): + a = dotc(d, v) + w = axpy(c, w, w.size, a) + return w + + @staticmethod + def _solve(v, alpha, cs, ds): + """Evaluate w = M^-1 v""" + if len(cs) == 0: + return v/alpha + + # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 + + axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) + + c0 = cs[0] + A = alpha * np.identity(len(cs), dtype=c0.dtype) + for i, d in enumerate(ds): + for j, c in enumerate(cs): + A[i,j] += dotc(d, c) + + q = np.zeros(len(cs), dtype=c0.dtype) + for j, d in enumerate(ds): + q[j] = dotc(d, v) + q /= alpha + q = solve(A, q) + + w = v/alpha + for c, qc in zip(cs, q): + w = axpy(c, w, w.size, -qc) + + return w + + def matvec(self, v): + """Evaluate w = M v""" + if self.collapsed is not None: + return np.dot(self.collapsed, v) + return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) + + def rmatvec(self, v): + """Evaluate w = M^H v""" + if self.collapsed is not None: + return np.dot(self.collapsed.T.conj(), v) + return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) + + def solve(self, v, tol=0): + """Evaluate w = M^-1 v""" + if self.collapsed is not None: + return solve(self.collapsed, v) + return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) + + def rsolve(self, v, tol=0): + """Evaluate w = M^-H v""" + if self.collapsed is not None: + return solve(self.collapsed.T.conj(), v) + return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) + + def append(self, c, d): + if self.collapsed is not None: + self.collapsed += c[:,None] * d[None,:].conj() + return + + self.cs.append(c) + self.ds.append(d) + + if len(self.cs) > c.size: + self.collapse() + + def __array__(self, dtype=None, copy=None): + if dtype is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `dtype` " + f"should only be None but was {dtype} (not handled)", + stacklevel=3) + if copy is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `copy` " + f"should only be None but was {copy} (not handled)", + stacklevel=3) + if self.collapsed is not None: + return self.collapsed + + Gm = self.alpha*np.identity(self.n, dtype=self.dtype) + for c, d in zip(self.cs, self.ds): + Gm += c[:,None]*d[None,:].conj() + return Gm + + def collapse(self): + """Collapse the low-rank matrix to a full-rank one.""" + self.collapsed = np.array(self) + self.cs = None + self.ds = None + self.alpha = None + + def restart_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping all vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + if len(self.cs) > rank: + del self.cs[:] + del self.ds[:] + + def simple_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping oldest vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + while len(self.cs) > rank: + del self.cs[0] + del self.ds[0] + + def svd_reduce(self, max_rank, to_retain=None): + """ + Reduce the rank of the matrix by retaining some SVD components. + + This corresponds to the \"Broyden Rank Reduction Inverse\" + algorithm described in [1]_. + + Note that the SVD decomposition can be done by solving only a + problem whose size is the effective rank of this matrix, which + is viable even for large problems. + + Parameters + ---------- + max_rank : int + Maximum rank of this matrix after reduction. + to_retain : int, optional + Number of SVD components to retain when reduction is done + (ie. rank > max_rank). Default is ``max_rank - 2``. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + if self.collapsed is not None: + return + + p = max_rank + if to_retain is not None: + q = to_retain + else: + q = p - 2 + + if self.cs: + p = min(p, len(self.cs[0])) + q = max(0, min(q, p-1)) + + m = len(self.cs) + if m < p: + # nothing to do + return + + C = np.array(self.cs).T + D = np.array(self.ds).T + + D, R = qr(D, mode='economic') + C = dot(C, R.T.conj()) + + U, S, WH = svd(C, full_matrices=False) + + C = dot(C, inv(WH)) + D = dot(D, WH.T.conj()) + + for k in range(q): + self.cs[k] = C[:,k].copy() + self.ds[k] = D[:,k].copy() + + del self.cs[q:] + del self.ds[q:] + + +_doc_parts['broyden_params'] = """ + alpha : float, optional + Initial guess for the Jacobian is ``(-1/alpha)``. + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden matrix + stays low. Can either be a string giving the name of the method, + or a tuple of the form ``(method, param1, param2, ...)`` + that gives the name of the method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """.strip() + + +class BroydenFirst(GenericBroyden): + r""" + Find a root of a function, using Broyden's first Jacobian approximation. + + This method is also known as \"Broyden's good method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden1'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) + + which corresponds to Broyden's first Jacobian update + + .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx + + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden1(fun, [0, 0]) + >>> sol + array([0.84116396, 0.15883641]) + + """ + + def __init__(self, alpha=None, reduction_method='restart', max_rank=None): + GenericBroyden.__init__(self) + self.alpha = alpha + self.Gm = None + + if max_rank is None: + max_rank = np.inf + self.max_rank = max_rank + + if isinstance(reduction_method, str): + reduce_params = () + else: + reduce_params = reduction_method[1:] + reduction_method = reduction_method[0] + reduce_params = (max_rank - 1,) + reduce_params + + if reduction_method == 'svd': + self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) + elif reduction_method == 'simple': + self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) + elif reduction_method == 'restart': + self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) + else: + raise ValueError("Unknown rank reduction method '%s'" % + reduction_method) + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) + + def todense(self): + return inv(self.Gm) + + def solve(self, f, tol=0): + r = self.Gm.matvec(f) + if not np.isfinite(r).all(): + # singular; reset the Jacobian approximation + self.setup(self.last_x, self.last_f, self.func) + return self.Gm.matvec(f) + return r + + def matvec(self, f): + return self.Gm.solve(f) + + def rsolve(self, f, tol=0): + return self.Gm.rmatvec(f) + + def rmatvec(self, f): + return self.Gm.rsolve(f) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = self.Gm.rmatvec(dx) + c = dx - self.Gm.matvec(df) + d = v / vdot(df, v) + + self.Gm.append(c, d) + + +class BroydenSecond(BroydenFirst): + """ + Find a root of a function, using Broyden\'s second Jacobian approximation. + + This method is also known as \"Broyden's bad method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden2'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) + + corresponding to Broyden's second method. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden2(fun, [0, 0]) + >>> sol + array([0.84116365, 0.15883529]) + + """ + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = df + c = dx - self.Gm.matvec(df) + d = v / df_norm**2 + self.Gm.append(c, d) + + +#------------------------------------------------------------------------------ +# Broyden-like (restricted memory) +#------------------------------------------------------------------------------ + +class Anderson(GenericBroyden): + """ + Find a root of a function, using (extended) Anderson mixing. + + The Jacobian is formed by for a 'best' solution in the space + spanned by last `M` vectors. As a result, only a MxM matrix + inversions and MxN multiplications are required. [Ey]_ + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='anderson'`` in particular. + + References + ---------- + .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.anderson(fun, [0, 0]) + >>> sol + array([0.84116588, 0.15883789]) + + """ + + # Note: + # + # Anderson method maintains a rank M approximation of the inverse Jacobian, + # + # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v + # A = W + dF^H dF + # W = w0^2 diag(dF^H dF) + # + # so that for w0 = 0 the secant condition applies for last M iterates, i.e., + # + # J^-1 df_j = dx_j + # + # for all j = 0 ... M-1. + # + # Moreover, (from Sherman-Morrison-Woodbury formula) + # + # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v + # C = (dX + alpha dF) A^-1 + # b = -1/alpha + # + # and after simplification + # + # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v + # + + def __init__(self, alpha=None, w0=0.01, M=5): + GenericBroyden.__init__(self) + self.alpha = alpha + self.M = M + self.dx = [] + self.df = [] + self.gamma = None + self.w0 = w0 + + def solve(self, f, tol=0): + dx = -self.alpha*f + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + try: + gamma = solve(self.a, df_f) + except LinAlgError: + # singular; reset the Jacobian approximation + del self.dx[:] + del self.df[:] + return dx + + for m in range(n): + dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) + return dx + + def matvec(self, f): + dx = -f/self.alpha + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + b = np.empty((n, n), dtype=f.dtype) + for i in range(n): + for j in range(n): + b[i,j] = vdot(self.df[i], self.dx[j]) + if i == j and self.w0 != 0: + b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha + gamma = solve(b, df_f) + + for m in range(n): + dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) + return dx + + def _update(self, x, f, dx, df, dx_norm, df_norm): + if self.M == 0: + return + + self.dx.append(dx) + self.df.append(df) + + while len(self.dx) > self.M: + self.dx.pop(0) + self.df.pop(0) + + n = len(self.dx) + a = np.zeros((n, n), dtype=f.dtype) + + for i in range(n): + for j in range(i, n): + if i == j: + wd = self.w0**2 + else: + wd = 0 + a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) + + a += np.triu(a, 1).T.conj() + self.a = a + +#------------------------------------------------------------------------------ +# Simple iterations +#------------------------------------------------------------------------------ + + +class DiagBroyden(GenericBroyden): + """ + Find a root of a function, using diagonal Broyden Jacobian approximation. + + The Jacobian approximation is derived from previous iterations, by + retaining only the diagonal of Broyden matrices. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='diagbroyden'`` in particular. + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.diagbroyden(fun, [0, 0]) + >>> sol + array([0.84116403, 0.15883384]) + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f / self.d + + def matvec(self, f): + return -f * self.d + + def rsolve(self, f, tol=0): + return -f / self.d.conj() + + def rmatvec(self, f): + return -f * self.d.conj() + + def todense(self): + return np.diag(-self.d) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self.d -= (df + self.d*dx)*dx/dx_norm**2 + + +class LinearMixing(GenericBroyden): + """ + Find a root of a function, using a scalar Jacobian approximation. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + The Jacobian approximation is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='linearmixing'`` in particular. + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def solve(self, f, tol=0): + return -f*self.alpha + + def matvec(self, f): + return -f/self.alpha + + def rsolve(self, f, tol=0): + return -f*np.conj(self.alpha) + + def rmatvec(self, f): + return -f/np.conj(self.alpha) + + def todense(self): + return np.diag(np.full(self.shape[0], -1/self.alpha)) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + pass + + +class ExcitingMixing(GenericBroyden): + """ + Find a root of a function, using a tuned diagonal Jacobian approximation. + + The Jacobian matrix is diagonal and is tuned on each iteration. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='excitingmixing'`` in particular. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + %(params_extra)s + """ + + def __init__(self, alpha=None, alphamax=1.0): + GenericBroyden.__init__(self) + self.alpha = alpha + self.alphamax = alphamax + self.beta = None + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f*self.beta + + def matvec(self, f): + return -f/self.beta + + def rsolve(self, f, tol=0): + return -f*self.beta.conj() + + def rmatvec(self, f): + return -f/self.beta.conj() + + def todense(self): + return np.diag(-1/self.beta) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + incr = f*self.last_f > 0 + self.beta[incr] += self.alpha + self.beta[~incr] = self.alpha + np.clip(self.beta, 0, self.alphamax, out=self.beta) + + +#------------------------------------------------------------------------------ +# Iterative/Krylov approximated Jacobians +#------------------------------------------------------------------------------ + +class KrylovJacobian(Jacobian): + r""" + Find a root of a function, using Krylov approximation for inverse Jacobian. + + This method is suitable for solving large-scale problems. + + Parameters + ---------- + %(params_basic)s + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_maxiter : int, optional + Parameter to pass to the "inner" Krylov solver: maximum number of + iterations. Iteration will stop after maxiter steps even if the + specified tolerance has not been achieved. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> from scipy.optimize import BroydenFirst, KrylovJacobian + >>> from scipy.optimize import InverseJacobian + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) + + If the preconditioner has a method named 'update', it will be called + as ``update(x, f)`` after each nonlinear step, with ``x`` giving + the current point, and ``f`` the current function value. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear iterations. + See `scipy.sparse.linalg.lgmres` for details. + inner_kwargs : kwargs + Keyword parameters for the "inner" Krylov solver + (defined with `method`). Parameter names must start with + the `inner_` prefix which will be stripped before passing on + the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='krylov'`` in particular. + scipy.sparse.linalg.gmres + scipy.sparse.linalg.lgmres + + Notes + ----- + This function implements a Newton-Krylov solver. The basic idea is + to compute the inverse of the Jacobian with an iterative Krylov + method. These methods require only evaluating the Jacobian-vector + products, which are conveniently approximated by a finite difference: + + .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega + + Due to the use of iterative matrix inverses, these methods can + deal with large nonlinear problems. + + SciPy's `scipy.sparse.linalg` module offers a selection of Krylov + solvers to choose from. The default here is `lgmres`, which is a + variant of restarted GMRES iteration that reuses some of the + information obtained in the previous Newton steps to invert + Jacobians in subsequent steps. + + For a review on Newton-Krylov methods, see for example [1]_, + and for the LGMRES sparse inverse method, see [2]_. + + References + ---------- + .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method, + SIAM, pp.57-83, 2003. + :doi:`10.1137/1.9780898718898.ch3` + .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). + :doi:`10.1016/j.jcp.2003.08.010` + .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel, + SIAM J. Matrix Anal. Appl. 26, 962 (2005). + :doi:`10.1137/S0895479803422014` + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * x[1] - 1.0, + ... 0.5 * (x[1] - x[0]) ** 2] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.newton_krylov(fun, [0, 0]) + >>> sol + array([0.66731771, 0.66536458]) + + """ + + def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, + inner_M=None, outer_k=10, **kw): + self.preconditioner = inner_M + self.rdiff = rdiff + # Note that this retrieves one of the named functions, or otherwise + # uses `method` as is (i.e., for a user-provided callable). + self.method = dict( + bicgstab=scipy.sparse.linalg.bicgstab, + gmres=scipy.sparse.linalg.gmres, + lgmres=scipy.sparse.linalg.lgmres, + cgs=scipy.sparse.linalg.cgs, + minres=scipy.sparse.linalg.minres, + tfqmr=scipy.sparse.linalg.tfqmr, + ).get(method, method) + + self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) + + if self.method is scipy.sparse.linalg.gmres: + # Replace GMRES's outer iteration with Newton steps + self.method_kw['restart'] = inner_maxiter + self.method_kw['maxiter'] = 1 + self.method_kw.setdefault('atol', 0) + elif self.method in (scipy.sparse.linalg.gcrotmk, + scipy.sparse.linalg.bicgstab, + scipy.sparse.linalg.cgs): + self.method_kw.setdefault('atol', 0) + elif self.method is scipy.sparse.linalg.lgmres: + self.method_kw['outer_k'] = outer_k + # Replace LGMRES's outer iteration with Newton steps + self.method_kw['maxiter'] = 1 + # Carry LGMRES's `outer_v` vectors across nonlinear iterations + self.method_kw.setdefault('outer_v', []) + self.method_kw.setdefault('prepend_outer_v', True) + # But don't carry the corresponding Jacobian*v products, in case + # the Jacobian changes a lot in the nonlinear step + # + # XXX: some trust-region inspired ideas might be more efficient... + # See e.g., Brown & Saad. But needs to be implemented separately + # since it's not an inexact Newton method. + self.method_kw.setdefault('store_outer_Av', False) + self.method_kw.setdefault('atol', 0) + + for key, value in kw.items(): + if not key.startswith('inner_'): + raise ValueError("Unknown parameter %s" % key) + self.method_kw[key[6:]] = value + + def _update_diff_step(self): + mx = abs(self.x0).max() + mf = abs(self.f0).max() + self.omega = self.rdiff * max(1, mx) / max(1, mf) + + def matvec(self, v): + nv = norm(v) + if nv == 0: + return 0*v + sc = self.omega / nv + r = (self.func(self.x0 + sc*v) - self.f0) / sc + if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): + raise ValueError('Function returned non-finite results') + return r + + def solve(self, rhs, tol=0): + if 'rtol' in self.method_kw: + sol, info = self.method(self.op, rhs, **self.method_kw) + else: + sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw) + return sol + + def update(self, x, f): + self.x0 = x + self.f0 = f + self._update_diff_step() + + # Update also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'update'): + self.preconditioner.update(x, f) + + def setup(self, x, f, func): + Jacobian.setup(self, x, f, func) + self.x0 = x + self.f0 = f + self.op = scipy.sparse.linalg.aslinearoperator(self) + + if self.rdiff is None: + self.rdiff = np.finfo(x.dtype).eps ** (1./2) + + self._update_diff_step() + + # Setup also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'setup'): + self.preconditioner.setup(x, f, func) + + +#------------------------------------------------------------------------------ +# Wrapper functions +#------------------------------------------------------------------------------ + +def _nonlin_wrapper(name, jac): + """ + Construct a solver wrapper with given name and Jacobian approx. + + It inspects the keyword arguments of ``jac.__init__``, and allows to + use the same arguments in the wrapper function, in addition to the + keyword arguments of `nonlin_solve` + + """ + signature = _getfullargspec(jac.__init__) + args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature + kwargs = list(zip(args[-len(defaults):], defaults)) + kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs]) + if kw_str: + kw_str = ", " + kw_str + kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs]) + if kwkw_str: + kwkw_str = kwkw_str + ", " + if kwonlyargs: + raise ValueError('Unexpected signature %s' % signature) + + # Construct the wrapper function so that its keyword arguments + # are visible in pydoc.help etc. + wrapper = """ +def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, + f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, **kw): + jac = %(jac)s(%(kwkw)s **kw) + return nonlin_solve(F, xin, jac, iter, verbose, maxiter, + f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, + callback) +""" + + wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, + kwkw=kwkw_str) + ns = {} + ns.update(globals()) + exec(wrapper, ns) + func = ns[name] + func.__doc__ = jac.__doc__ + _set_doc(func) + return func + + +broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) +broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) +anderson = _nonlin_wrapper('anderson', Anderson) +linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) +diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) +excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) +newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_numdiff.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bd0d37a460b4279adbb55f4cbf067233aa16ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_numdiff.py @@ -0,0 +1,775 @@ +"""Routines for numerical differentiation.""" +import functools +import numpy as np +from numpy.linalg import norm + +from scipy.sparse.linalg import LinearOperator +from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find +from ._group_columns import group_dense, group_sparse +from scipy._lib._array_api import atleast_nd, array_namespace + + +def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): + """Adjust final difference scheme to the presence of bounds. + + Parameters + ---------- + x0 : ndarray, shape (n,) + Point at which we wish to estimate derivative. + h : ndarray, shape (n,) + Desired absolute finite difference steps. + num_steps : int + Number of `h` steps in one direction required to implement finite + difference scheme. For example, 2 means that we need to evaluate + f(x0 + 2 * h) or f(x0 - 2 * h) + scheme : {'1-sided', '2-sided'} + Whether steps in one or both directions are required. In other + words '1-sided' applies to forward and backward schemes, '2-sided' + applies to center schemes. + lb : ndarray, shape (n,) + Lower bounds on independent variables. + ub : ndarray, shape (n,) + Upper bounds on independent variables. + + Returns + ------- + h_adjusted : ndarray, shape (n,) + Adjusted absolute step sizes. Step size decreases only if a sign flip + or switching to one-sided scheme doesn't allow to take a full step. + use_one_sided : ndarray of bool, shape (n,) + Whether to switch to one-sided scheme. Informative only for + ``scheme='2-sided'``. + """ + if scheme == '1-sided': + use_one_sided = np.ones_like(h, dtype=bool) + elif scheme == '2-sided': + h = np.abs(h) + use_one_sided = np.zeros_like(h, dtype=bool) + else: + raise ValueError("`scheme` must be '1-sided' or '2-sided'.") + + if np.all((lb == -np.inf) & (ub == np.inf)): + return h, use_one_sided + + h_total = h * num_steps + h_adjusted = h.copy() + + lower_dist = x0 - lb + upper_dist = ub - x0 + + if scheme == '1-sided': + x = x0 + h_total + violated = (x < lb) | (x > ub) + fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) + h_adjusted[violated & fitting] *= -1 + + forward = (upper_dist >= lower_dist) & ~fitting + h_adjusted[forward] = upper_dist[forward] / num_steps + backward = (upper_dist < lower_dist) & ~fitting + h_adjusted[backward] = -lower_dist[backward] / num_steps + elif scheme == '2-sided': + central = (lower_dist >= h_total) & (upper_dist >= h_total) + + forward = (upper_dist >= lower_dist) & ~central + h_adjusted[forward] = np.minimum( + h[forward], 0.5 * upper_dist[forward] / num_steps) + use_one_sided[forward] = True + + backward = (upper_dist < lower_dist) & ~central + h_adjusted[backward] = -np.minimum( + h[backward], 0.5 * lower_dist[backward] / num_steps) + use_one_sided[backward] = True + + min_dist = np.minimum(upper_dist, lower_dist) / num_steps + adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) + h_adjusted[adjusted_central] = min_dist[adjusted_central] + use_one_sided[adjusted_central] = False + + return h_adjusted, use_one_sided + + +@functools.lru_cache +def _eps_for_method(x0_dtype, f0_dtype, method): + """ + Calculates relative EPS step to use for a given data type + and numdiff step method. + + Progressively smaller steps are used for larger floating point types. + + Parameters + ---------- + f0_dtype: np.dtype + dtype of function evaluation + + x0_dtype: np.dtype + dtype of parameter vector + + method: {'2-point', '3-point', 'cs'} + + Returns + ------- + EPS: float + relative step size. May be np.float16, np.float32, np.float64 + + Notes + ----- + The default relative step will be np.float64. However, if x0 or f0 are + smaller floating point types (np.float16, np.float32), then the smallest + floating point type is chosen. + """ + # the default EPS value + EPS = np.finfo(np.float64).eps + + x0_is_fp = False + if np.issubdtype(x0_dtype, np.inexact): + # if you're a floating point type then over-ride the default EPS + EPS = np.finfo(x0_dtype).eps + x0_itemsize = np.dtype(x0_dtype).itemsize + x0_is_fp = True + + if np.issubdtype(f0_dtype, np.inexact): + f0_itemsize = np.dtype(f0_dtype).itemsize + # choose the smallest itemsize between x0 and f0 + if x0_is_fp and f0_itemsize < x0_itemsize: + EPS = np.finfo(f0_dtype).eps + + if method in ["2-point", "cs"]: + return EPS**0.5 + elif method in ["3-point"]: + return EPS**(1/3) + else: + raise RuntimeError("Unknown step method, should be one of " + "{'2-point', '3-point', 'cs'}") + + +def _compute_absolute_step(rel_step, x0, f0, method): + """ + Computes an absolute step from a relative step for finite difference + calculation. + + Parameters + ---------- + rel_step: None or array-like + Relative step for the finite difference calculation + x0 : np.ndarray + Parameter vector + f0 : np.ndarray or scalar + method : {'2-point', '3-point', 'cs'} + + Returns + ------- + h : float + The absolute step size + + Notes + ----- + `h` will always be np.float64. However, if `x0` or `f0` are + smaller floating point dtypes (e.g. np.float32), then the absolute + step size will be calculated from the smallest floating point size. + """ + # this is used instead of np.sign(x0) because we need + # sign_x0 to be 1 when x0 == 0. + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + + rstep = _eps_for_method(x0.dtype, f0.dtype, method) + + if rel_step is None: + abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0)) + else: + # User has requested specific relative steps. + # Don't multiply by max(1, abs(x0) because if x0 < 1 then their + # requested step is not used. + abs_step = rel_step * sign_x0 * np.abs(x0) + + # however we don't want an abs_step of 0, which can happen if + # rel_step is 0, or x0 is 0. Instead, substitute a realistic step + dx = ((x0 + abs_step) - x0) + abs_step = np.where(dx == 0, + rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), + abs_step) + + return abs_step + + +def _prepare_bounds(bounds, x0): + """ + Prepares new-style bounds from a two-tuple specifying the lower and upper + limits for values in x0. If a value is not bound then the lower/upper bound + will be expected to be -np.inf/np.inf. + + Examples + -------- + >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5]) + (array([0., 1., 2.]), array([ 1., 2., inf])) + """ + lb, ub = (np.asarray(b, dtype=float) for b in bounds) + if lb.ndim == 0: + lb = np.resize(lb, x0.shape) + + if ub.ndim == 0: + ub = np.resize(ub, x0.shape) + + return lb, ub + + +def group_columns(A, order=0): + """Group columns of a 2-D matrix for sparse finite differencing [1]_. + + Two columns are in the same group if in each row at least one of them + has zero. A greedy sequential algorithm is used to construct groups. + + Parameters + ---------- + A : array_like or sparse matrix, shape (m, n) + Matrix of which to group columns. + order : int, iterable of int with shape (n,) or None + Permutation array which defines the order of columns enumeration. + If int or None, a random permutation is used with `order` used as + a random seed. Default is 0, that is use a random permutation but + guarantee repeatability. + + Returns + ------- + groups : ndarray of int, shape (n,) + Contains values from 0 to n_groups-1, where n_groups is the number + of found groups. Each value ``groups[i]`` is an index of a group to + which ith column assigned. The procedure was helpful only if + n_groups is significantly less than n. + + References + ---------- + .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + """ + if issparse(A): + A = csc_matrix(A) + else: + A = np.atleast_2d(A) + A = (A != 0).astype(np.int32) + + if A.ndim != 2: + raise ValueError("`A` must be 2-dimensional.") + + m, n = A.shape + + if order is None or np.isscalar(order): + rng = np.random.RandomState(order) + order = rng.permutation(n) + else: + order = np.asarray(order) + if order.shape != (n,): + raise ValueError("`order` has incorrect shape.") + + A = A[:, order] + + if issparse(A): + groups = group_sparse(m, n, A.indices, A.indptr) + else: + groups = group_dense(m, n, A) + + groups[order] = groups.copy() + + return groups + + +def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None, + f0=None, bounds=(-np.inf, np.inf), sparsity=None, + as_linear_operator=False, args=(), kwargs={}): + """Compute finite difference approximation of the derivatives of a + vector-valued function. + + If a function maps from R^n to R^m, its derivatives form m-by-n matrix + called the Jacobian, where an element (i, j) is a partial derivative of + f[i] with respect to x[j]. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to a 1-D array. + method : {'3-point', '2-point', 'cs'}, optional + Finite difference method to use: + - '2-point' - use the first order accuracy forward or backward + difference. + - '3-point' - use central difference in interior points and the + second order accuracy forward or backward difference + near the boundary. + - 'cs' - use a complex-step finite difference scheme. This assumes + that the user function is real-valued and can be + analytically continued to the complex plane. Otherwise, + produces bogus results. + rel_step : None or array_like, optional + Relative step size to use. If None (default) the absolute step size is + computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with + `rel_step` being selected automatically, see Notes. Otherwise + ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the + sign of `h` is ignored. The calculated step size is possibly adjusted + to fit into the bounds. + abs_step : array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `abs_step` is ignored. By default + relative steps are used, only if ``abs_step is not None`` are absolute + steps used. + f0 : None or array_like, optional + If not None it is assumed to be equal to ``fun(x0)``, in this case + the ``fun(x0)`` is not called. Default is None. + bounds : tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. Bounds checking is not implemented + when `as_linear_operator` is True. + sparsity : {None, array_like, sparse matrix, 2-tuple}, optional + Defines a sparsity structure of the Jacobian matrix. If the Jacobian + matrix is known to have only few non-zero elements in each row, then + it's possible to estimate its several columns by a single function + evaluation [3]_. To perform such economic computations two ingredients + are required: + + * structure : array_like or sparse matrix of shape (m, n). A zero + element means that a corresponding element of the Jacobian + identically equals to zero. + * groups : array_like of shape (n,). A column grouping for a given + sparsity structure, use `group_columns` to obtain it. + + A single array or a sparse matrix is interpreted as a sparsity + structure, and groups are computed inside the function. A tuple is + interpreted as (structure, groups). If None (default), a standard + dense differencing will be used. + + Note, that sparse differencing makes sense only for large Jacobian + matrices where each row contains few non-zero elements. + as_linear_operator : bool, optional + When True the function returns an `scipy.sparse.linalg.LinearOperator`. + Otherwise it returns a dense array or a sparse matrix depending on + `sparsity`. The linear operator provides an efficient way of computing + ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow + direct access to individual elements of the matrix. By default + `as_linear_operator` is False. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)``. + + Returns + ------- + J : {ndarray, sparse matrix, LinearOperator} + Finite difference approximation of the Jacobian matrix. + If `as_linear_operator` is True returns a LinearOperator + with shape (m, n). Otherwise it returns a dense array or sparse + matrix depending on how `sparsity` is defined. If `sparsity` + is None then a ndarray with shape (m, n) is returned. If + `sparsity` is not None returns a csr_matrix with shape (m, n). + For sparse matrices and linear operators it is always returned as + a 2-D structure, for ndarrays, if m=1 it is returned + as a 1-D gradient array with shape (n,). + + See Also + -------- + check_derivative : Check correctness of a function computing derivatives. + + Notes + ----- + If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is + determined from the smallest floating point dtype of `x0` or `fun(x0)`, + ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and + s=3 for '3-point' method. Such relative step approximately minimizes a sum + of truncation and round-off errors, see [1]_. Relative steps are used by + default. However, absolute steps are used when ``abs_step is not None``. + If any of the absolute or relative steps produces an indistinguishable + difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a + automatic step size is substituted for that particular entry. + + A finite difference scheme for '3-point' method is selected automatically. + The well-known central difference scheme is used for points sufficiently + far from the boundary, and 3-point forward or backward scheme is used for + points near the boundary. Both schemes have the second-order accuracy in + terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point + forward and backward difference schemes. + + For dense differencing when m=1 Jacobian is returned with a shape (n,), + on the other hand when n=1 Jacobian is returned with a shape (m, 1). + Our motivation is the following: a) It handles a case of gradient + computation (m=1) in a conventional way. b) It clearly separates these two + different cases. b) In all cases np.atleast_2d can be called to get 2-D + Jacobian with correct dimensions. + + References + ---------- + .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", sec. 5.7. + + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + + .. [3] B. Fornberg, "Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import approx_derivative + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> approx_derivative(f, x0, args=(1, 2)) + array([[ 1., 0.], + [-1., 0.]]) + + Bounds can be used to limit the region of function evaluation. + In the example below we compute left and right derivative at point 1.0. + + >>> def g(x): + ... return x**2 if x >= 1 else x + ... + >>> x0 = 1.0 + >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) + array([ 1.]) + >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) + array([ 2.]) + """ + if method not in ['2-point', '3-point', 'cs']: + raise ValueError("Unknown method '%s'. " % method) + + xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + x0 = xp.astype(_x, _dtype) + + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + lb, ub = _prepare_bounds(bounds, x0) + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if as_linear_operator and not (np.all(np.isinf(lb)) + and np.all(np.isinf(ub))): + raise ValueError("Bounds not supported when " + "`as_linear_operator` is True.") + + def fun_wrapped(x): + # send user function same fp type as x0. (but only if cs is not being + # used + if xp.isdtype(x.dtype, "real floating"): + x = xp.astype(x, x0.dtype) + + f = np.atleast_1d(fun(x, *args, **kwargs)) + if f.ndim > 1: + raise RuntimeError("`fun` return value has " + "more than 1 dimension.") + return f + + if f0 is None: + f0 = fun_wrapped(x0) + else: + f0 = np.atleast_1d(f0) + if f0.ndim > 1: + raise ValueError("`f0` passed has more than 1 dimension.") + + if np.any((x0 < lb) | (x0 > ub)): + raise ValueError("`x0` violates bound constraints.") + + if as_linear_operator: + if rel_step is None: + rel_step = _eps_for_method(x0.dtype, f0.dtype, method) + + return _linear_operator_difference(fun_wrapped, x0, + f0, rel_step, method) + else: + # by default we use rel_step + if abs_step is None: + h = _compute_absolute_step(rel_step, x0, f0, method) + else: + # user specifies an absolute step + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + h = abs_step + + # cannot have a zero step. This might happen if x0 is very large + # or small. In which case fall back to relative step. + dx = ((x0 + h) - x0) + h = np.where(dx == 0, + _eps_for_method(x0.dtype, f0.dtype, method) * + sign_x0 * np.maximum(1.0, np.abs(x0)), + h) + + if method == '2-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', lb, ub) + elif method == '3-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + elif method == 'cs': + use_one_sided = False + + if sparsity is None: + return _dense_difference(fun_wrapped, x0, f0, h, + use_one_sided, method) + else: + if not issparse(sparsity) and len(sparsity) == 2: + structure, groups = sparsity + else: + structure = sparsity + groups = group_columns(sparsity) + + if issparse(structure): + structure = csc_matrix(structure) + else: + structure = np.atleast_2d(structure) + + groups = np.atleast_1d(groups) + return _sparse_difference(fun_wrapped, x0, f0, h, + use_one_sided, structure, + groups, method) + + +def _linear_operator_difference(fun, x0, f0, h, method): + m = f0.size + n = x0.size + + if method == '2-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p + df = fun(x) - f0 + return df / dx + + elif method == '3-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = 2*h / norm(p) + x1 = x0 - (dx/2)*p + x2 = x0 + (dx/2)*p + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + return df / dx + + elif method == 'cs': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p*1.j + f1 = fun(x) + df = f1.imag + return df / dx + + else: + raise RuntimeError("Never be here.") + + return LinearOperator((m, n), matvec) + + +def _dense_difference(fun, x0, f0, h, use_one_sided, method): + m = f0.size + n = x0.size + J_transposed = np.empty((n, m)) + h_vecs = np.diag(h) + + for i in range(h.size): + if method == '2-point': + x = x0 + h_vecs[i] + dx = x[i] - x0[i] # Recompute dx as exactly representable number. + df = fun(x) - f0 + elif method == '3-point' and use_one_sided[i]: + x1 = x0 + h_vecs[i] + x2 = x0 + 2 * h_vecs[i] + dx = x2[i] - x0[i] + f1 = fun(x1) + f2 = fun(x2) + df = -3.0 * f0 + 4 * f1 - f2 + elif method == '3-point' and not use_one_sided[i]: + x1 = x0 - h_vecs[i] + x2 = x0 + h_vecs[i] + dx = x2[i] - x1[i] + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + elif method == 'cs': + f1 = fun(x0 + h_vecs[i]*1.j) + df = f1.imag + dx = h_vecs[i, i] + else: + raise RuntimeError("Never be here.") + + J_transposed[i] = df / dx + + if m == 1: + J_transposed = np.ravel(J_transposed) + + return J_transposed.T + + +def _sparse_difference(fun, x0, f0, h, use_one_sided, + structure, groups, method): + m = f0.size + n = x0.size + row_indices = [] + col_indices = [] + fractions = [] + + n_groups = np.max(groups) + 1 + for group in range(n_groups): + # Perturb variables which are in the same group simultaneously. + e = np.equal(group, groups) + h_vec = h * e + if method == '2-point': + x = x0 + h_vec + dx = x - x0 + df = fun(x) - f0 + # The result is written to columns which correspond to perturbed + # variables. + cols, = np.nonzero(e) + # Find all non-zero elements in selected columns of Jacobian. + i, j, _ = find(structure[:, cols]) + # Restore column indices in the full array. + j = cols[j] + elif method == '3-point': + # Here we do conceptually the same but separate one-sided + # and two-sided schemes. + x1 = x0.copy() + x2 = x0.copy() + + mask_1 = use_one_sided & e + x1[mask_1] += h_vec[mask_1] + x2[mask_1] += 2 * h_vec[mask_1] + + mask_2 = ~use_one_sided & e + x1[mask_2] -= h_vec[mask_2] + x2[mask_2] += h_vec[mask_2] + + dx = np.zeros(n) + dx[mask_1] = x2[mask_1] - x0[mask_1] + dx[mask_2] = x2[mask_2] - x1[mask_2] + + f1 = fun(x1) + f2 = fun(x2) + + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + + mask = use_one_sided[j] + df = np.empty(m) + + rows = i[mask] + df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] + + rows = i[~mask] + df[rows] = f2[rows] - f1[rows] + elif method == 'cs': + f1 = fun(x0 + h_vec*1.j) + df = f1.imag + dx = h_vec + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + else: + raise ValueError("Never be here.") + + # All that's left is to compute the fraction. We store i, j and + # fractions as separate arrays and later construct coo_matrix. + row_indices.append(i) + col_indices.append(j) + fractions.append(df[i] / dx[j]) + + row_indices = np.hstack(row_indices) + col_indices = np.hstack(col_indices) + fractions = np.hstack(fractions) + J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) + return csr_matrix(J) + + +def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), + kwargs={}): + """Check correctness of a function computing derivatives (Jacobian or + gradient) by comparison with a finite difference approximation. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + jac : callable + Function which computes Jacobian matrix of `fun`. It must work with + argument x the same way as `fun`. The return value must be array_like + or sparse matrix with an appropriate shape. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to 1-D array. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same + for `jac`. + + Returns + ------- + accuracy : float + The maximum among all relative errors for elements with absolute values + higher than 1 and absolute errors for elements with absolute values + less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, + then it is likely that your `jac` implementation is correct. + + See Also + -------- + approx_derivative : Compute finite difference approximation of derivative. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import check_derivative + >>> + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> def jac(x, c1, c2): + ... return np.array([ + ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], + ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] + ... ]) + ... + >>> + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> check_derivative(f, jac, x0, args=(1, 2)) + 2.4492935982947064e-16 + """ + J_to_test = jac(x0, *args, **kwargs) + if issparse(J_to_test): + J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, + args=args, kwargs=kwargs) + J_to_test = csr_matrix(J_to_test) + abs_err = J_to_test - J_diff + i, j, abs_err_data = find(abs_err) + J_diff_data = np.asarray(J_diff[i, j]).ravel() + return np.max(np.abs(abs_err_data) / + np.maximum(1, np.abs(J_diff_data))) + else: + J_diff = approx_derivative(fun, x0, bounds=bounds, + args=args, kwargs=kwargs) + abs_err = np.abs(J_to_test - J_diff) + return np.max(abs_err / np.maximum(1, np.abs(J_diff))) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_optimize.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1d730c469ec3a9cb3e74065df004abc771997c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_optimize.py @@ -0,0 +1,4092 @@ +#__docformat__ = "restructuredtext en" +# ******NOTICE*************** +# optimize.py module by Travis E. Oliphant +# +# You may copy and use this module as you see fit with no +# guarantee implied provided you keep this notice in all copies. +# *****END NOTICE************ + +# A collection of optimization algorithms. Version 0.5 +# CHANGES +# Added fminbound (July 2001) +# Added brute (Aug. 2002) +# Finished line search satisfying strong Wolfe conditions (Mar. 2004) +# Updated strong Wolfe conditions line search to use +# cubic-interpolation (Mar. 2004) + + +# Minimization routines + +__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', + 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', + 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', + 'line_search', 'check_grad', 'OptimizeResult', 'show_options', + 'OptimizeWarning'] + +__docformat__ = "restructuredtext en" + +import math +import warnings +import sys +import inspect +from numpy import (atleast_1d, eye, argmin, zeros, shape, squeeze, + asarray, sqrt) +import numpy as np +from scipy.linalg import cholesky, issymmetric, LinAlgError +from scipy.sparse.linalg import LinearOperator +from ._linesearch import (line_search_wolfe1, line_search_wolfe2, + line_search_wolfe2 as line_search, + LineSearchWarning) +from ._numdiff import approx_derivative +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from scipy._lib._util import (MapWrapper, check_random_state, _RichResult, + _call_callback_maybe_halt) +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS + + +# standard status messages of optimizers +_status_message = {'success': 'Optimization terminated successfully.', + 'maxfev': 'Maximum number of function evaluations has ' + 'been exceeded.', + 'maxiter': 'Maximum number of iterations has been ' + 'exceeded.', + 'pr_loss': 'Desired error not necessarily achieved due ' + 'to precision loss.', + 'nan': 'NaN result encountered.', + 'out_of_bounds': 'The result is outside of the provided ' + 'bounds.'} + + +class MemoizeJac: + """ Decorator that caches the return values of a function returning `(fun, grad)` + each time it is called. """ + + def __init__(self, fun): + self.fun = fun + self.jac = None + self._value = None + self.x = None + + def _compute_if_needed(self, x, *args): + if not np.all(x == self.x) or self._value is None or self.jac is None: + self.x = np.asarray(x).copy() + fg = self.fun(x, *args) + self.jac = fg[1] + self._value = fg[0] + + def __call__(self, x, *args): + """ returns the function value """ + self._compute_if_needed(x, *args) + return self._value + + def derivative(self, x, *args): + self._compute_if_needed(x, *args) + return self.jac + + +def _wrap_callback(callback, method=None): + """Wrap a user-provided callback so that attributes can be attached.""" + if callback is None or method in {'tnc', 'slsqp', 'cobyla'}: + return callback # don't wrap + + sig = inspect.signature(callback) + + if set(sig.parameters) == {'intermediate_result'}: + def wrapped_callback(res): + return callback(intermediate_result=res) + elif method == 'trust-constr': + def wrapped_callback(res): + return callback(np.copy(res.x), res) + elif method == 'differential_evolution': + def wrapped_callback(res): + return callback(np.copy(res.x), res.convergence) + else: + def wrapped_callback(res): + return callback(np.copy(res.x)) + + wrapped_callback.stop_iteration = False + return wrapped_callback + + +class OptimizeResult(_RichResult): + """ + Represents the optimization result. + + Attributes + ---------- + x : ndarray + The solution of the optimization. + success : bool + Whether or not the optimizer exited successfully. + status : int + Termination status of the optimizer. Its value depends on the + underlying solver. Refer to `message` for details. + message : str + Description of the cause of the termination. + fun, jac, hess: ndarray + Values of objective function, its Jacobian and its Hessian (if + available). The Hessians may be approximations, see the documentation + of the function in question. + hess_inv : object + Inverse of the objective function's Hessian; may be an approximation. + Not available for all solvers. The type of this attribute may be + either np.ndarray or scipy.sparse.linalg.LinearOperator. + nfev, njev, nhev : int + Number of evaluations of the objective functions and of its + Jacobian and Hessian. + nit : int + Number of iterations performed by the optimizer. + maxcv : float + The maximum constraint violation. + + Notes + ----- + Depending on the specific solver being used, `OptimizeResult` may + not have all attributes listed here, and they may have additional + attributes not listed here. Since this class is essentially a + subclass of dict with attribute accessors, one can see which + attributes are available using the `OptimizeResult.keys` method. + + """ + pass + + +class OptimizeWarning(UserWarning): + pass + +def _check_positive_definite(Hk): + def is_pos_def(A): + if issymmetric(A): + try: + cholesky(A) + return True + except LinAlgError: + return False + else: + return False + if Hk is not None: + if not is_pos_def(Hk): + raise ValueError("'hess_inv0' matrix isn't positive definite.") + + +def _check_unknown_options(unknown_options): + if unknown_options: + msg = ", ".join(map(str, unknown_options.keys())) + # Stack level 4: this is called from _minimize_*, which is + # called from another function in SciPy. Level 4 is the first + # level in user code. + warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, stacklevel=4) + + +def is_finite_scalar(x): + """Test whether `x` is either a finite scalar or a finite array scalar. + + """ + return np.size(x) == 1 and np.isfinite(x) + + +_epsilon = sqrt(np.finfo(float).eps) + + +def vecnorm(x, ord=2): + if ord == np.inf: + return np.amax(np.abs(x)) + elif ord == -np.inf: + return np.amin(np.abs(x)) + else: + return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord) + + +def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None, + epsilon=None, finite_diff_rel_step=None, + hess=None): + """ + Creates a ScalarFunction object for use with scalar minimizers + (BFGS/LBFGSB/SLSQP/TNC/CG/etc). + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is an 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where 'n' is the number of independent variables. + jac : {callable, '2-point', '3-point', 'cs', None}, optional + Method for computing the gradient vector. If it is a callable, it + should be a function that returns the gradient vector: + + ``jac(x, *args) -> array_like, shape (n,)`` + + If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient + is calculated with a relative step for finite differences. If `None`, + then two-point finite differences with an absolute step is used. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` functions). + bounds : sequence, optional + Bounds on variables. 'new-style' bounds are required. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + hess : {callable, '2-point', '3-point', 'cs', None} + Computes the Hessian matrix. If it is callable, it should return the + Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + Alternatively, the keywords {'2-point', '3-point', 'cs'} select a + finite difference scheme for numerical estimation. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + + Returns + ------- + sf : ScalarFunction + """ + if callable(jac): + grad = jac + elif jac in FD_METHODS: + # epsilon is set to None so that ScalarFunction is made to use + # rel_step + epsilon = None + grad = jac + else: + # default (jac is None) is to do 2-point finite differences with + # absolute step size. ScalarFunction has to be provided an + # epsilon value that is not None to use absolute steps. This is + # normally the case from most _minimize* methods. + grad = '2-point' + epsilon = epsilon + + if hess is None: + # ScalarFunction requires something for hess, so we give a dummy + # implementation here if nothing is provided, return a value of None + # so that downstream minimisers halt. The results of `fun.hess` + # should not be used. + def hess(x, *args): + return None + + if bounds is None: + bounds = (-np.inf, np.inf) + + # ScalarFunction caches. Reuse of fun(x) during grad + # calculation reduces overall function evaluations. + sf = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, bounds, epsilon=epsilon) + + return sf + + +def _clip_x_for_func(func, bounds): + # ensures that x values sent to func are clipped to bounds + + # this is used as a mitigation for gh11403, slsqp/tnc sometimes + # suggest a move that is outside the limits by 1 or 2 ULP. This + # unclean fix makes sure x is strictly within bounds. + def eval(x): + x = _check_clip_x(x, bounds) + return func(x) + + return eval + + +def _check_clip_x(x, bounds): + if (x < bounds[0]).any() or (x > bounds[1]).any(): + warnings.warn("Values in x were outside bounds during a " + "minimize step, clipping to bounds", + RuntimeWarning, stacklevel=3) + x = np.clip(x, bounds[0], bounds[1]) + return x + + return x + + +def rosen(x): + """ + The Rosenbrock function. + + The function computed is:: + + sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + + Parameters + ---------- + x : array_like + 1-D array of points at which the Rosenbrock function is to be computed. + + Returns + ------- + f : float + The value of the Rosenbrock function. + + See Also + -------- + rosen_der, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen + >>> X = 0.1 * np.arange(10) + >>> rosen(X) + 76.56 + + For higher-dimensional input ``rosen`` broadcasts. + In the following example, we use this to plot a 2D landscape. + Note that ``rosen_hess`` does not broadcast in this manner. + + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + >>> x = np.linspace(-1, 1, 50) + >>> X, Y = np.meshgrid(x, x) + >>> ax = plt.subplot(111, projection='3d') + >>> ax.plot_surface(X, Y, rosen([X, Y])) + >>> plt.show() + """ + x = asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + +def rosen_der(x): + """ + The derivative (i.e. gradient) of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the derivative is to be computed. + + Returns + ------- + rosen_der : (N,) ndarray + The gradient of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_der + >>> X = 0.1 * np.arange(9) + >>> rosen_der(X) + array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ]) + + """ + x = asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + +def rosen_hess(x): + """ + The Hessian matrix of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + + Returns + ------- + rosen_hess : ndarray + The Hessian matrix of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_der, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess + >>> X = 0.1 * np.arange(4) + >>> rosen_hess(X) + array([[-38., 0., 0., 0.], + [ 0., 134., -40., 0.], + [ 0., -40., 130., -80.], + [ 0., 0., -80., 200.]]) + + """ + x = atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +def rosen_hess_prod(x, p): + """ + Product of the Hessian matrix of the Rosenbrock function with a vector. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + p : array_like + 1-D array, the vector to be multiplied by the Hessian matrix. + + Returns + ------- + rosen_hess_prod : ndarray + The Hessian matrix of the Rosenbrock function at `x` multiplied + by the vector `p`. + + See Also + -------- + rosen, rosen_der, rosen_hess + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess_prod + >>> X = 0.1 * np.arange(9) + >>> p = 0.5 * np.arange(9) + >>> rosen_hess_prod(X, p) + array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.]) + + """ + x = atleast_1d(x) + Hp = np.zeros(len(x), dtype=x.dtype) + Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] + Hp[1:-1] = (-400 * x[:-2] * p[:-2] + + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - + 400 * x[1:-1] * p[2:]) + Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] + return Hp + + +def _wrap_scalar_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +class _MaxFuncCallError(RuntimeError): + pass + + +def _wrap_scalar_function_maxfun_validation(function, args, maxfun): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + if ncalls[0] >= maxfun: + raise _MaxFuncCallError("Too many function calls") + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), + # np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, + full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): + """ + Minimize a function using the downhill simplex algorithm. + + This algorithm only uses function values, not derivatives or second + derivatives. + + Parameters + ---------- + func : callable func(x,*args) + The objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func, i.e., ``f(x,*args)``. + xtol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + ftol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : number, optional + Maximum number of function evaluations to make. + full_output : bool, optional + Set to True if fopt and warnflag outputs are desired. + disp : bool, optional + Set to True to print convergence messages. + retall : bool, optional + Set to True to return list of solutions at each iteration. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + initial_simplex : array_like of shape (N + 1, N), optional + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + + Returns + ------- + xopt : ndarray + Parameter that minimizes function. + fopt : float + Value of function at minimum: ``fopt = func(xopt)``. + iter : int + Number of iterations performed. + funcalls : int + Number of function calls made. + warnflag : int + 1 : Maximum number of function evaluations made. + 2 : Maximum number of iterations reached. + allvecs : list + Solution at each iteration. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Nelder-Mead' `method` in particular. + + Notes + ----- + Uses a Nelder-Mead simplex algorithm to find the minimum of function of + one or more variables. + + This algorithm has a long history of successful use in applications. + But it will usually be slower than an algorithm that uses first or + second derivative information. In practice, it can have poor + performance in high-dimensional problems and is not robust to + minimizing complicated functions. Additionally, there currently is no + complete theory describing when the algorithm will successfully + converge to the minimum, or how fast it will if it does. Both the ftol and + xtol criteria must be met for convergence. + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin(f, 1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 17 + Function evaluations: 34 + >>> minimum[0] + -8.8817841970012523e-16 + + References + ---------- + .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function + minimization", The Computer Journal, 7, pp. 308-313 + + .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now + Respectable", in Numerical Analysis 1995, Proceedings of the + 1995 Dundee Biennial Conference in Numerical Analysis, D.F. + Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, + Harlow, UK, pp. 191-208. + + """ + opts = {'xatol': xtol, + 'fatol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'return_all': retall, + 'initial_simplex': initial_simplex} + + callback = _wrap_callback(callback) + res = _minimize_neldermead(func, x0, args, callback=callback, **opts) + if full_output: + retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_neldermead(func, x0, args=(), callback=None, + maxiter=None, maxfev=None, disp=False, + return_all=False, initial_simplex=None, + xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Nelder-Mead algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*200``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + initial_simplex : array_like of shape (N + 1, N) + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + xatol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + fatol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + adaptive : bool, optional + Adapt algorithm parameters to dimensionality of problem. Useful for + high-dimensional minimization [1]_. + bounds : sequence or `Bounds`, optional + Bounds on variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + Note that this just clips all vertices in simplex based on + the bounds. + + References + ---------- + .. [1] Gao, F. and Han, L. + Implementing the Nelder-Mead simplex algorithm with adaptive + parameters. 2012. Computational Optimization and Applications. + 51:1, pp. 259-277 + + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x0 = np.atleast_1d(x0).flatten() + dtype = x0.dtype if np.issubdtype(x0.dtype, np.inexact) else np.float64 + x0 = np.asarray(x0, dtype=dtype) + + if adaptive: + dim = float(len(x0)) + rho = 1 + chi = 1 + 2/dim + psi = 0.75 - 1/(2*dim) + sigma = 1 - 1/dim + else: + rho = 1 + chi = 2 + psi = 0.5 + sigma = 0.5 + + nonzdelt = 0.05 + zdelt = 0.00025 + + if bounds is not None: + lower_bound, upper_bound = bounds.lb, bounds.ub + # check bounds + if (lower_bound > upper_bound).any(): + raise ValueError("Nelder Mead - one of the lower bounds " + "is greater than an upper bound.", + stacklevel=3) + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + if bounds is not None: + x0 = np.clip(x0, lower_bound, upper_bound) + + if initial_simplex is None: + N = len(x0) + + sim = np.empty((N + 1, N), dtype=x0.dtype) + sim[0] = x0 + for k in range(N): + y = np.array(x0, copy=True) + if y[k] != 0: + y[k] = (1 + nonzdelt)*y[k] + else: + y[k] = zdelt + sim[k + 1] = y + else: + sim = np.atleast_2d(initial_simplex).copy() + dtype = sim.dtype if np.issubdtype(sim.dtype, np.inexact) else np.float64 + sim = np.asarray(sim, dtype=dtype) + if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: + raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") + if len(x0) != sim.shape[1]: + raise ValueError("Size of `initial_simplex` is not consistent with `x0`") + N = sim.shape[1] + + if retall: + allvecs = [sim[0]] + + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 200 + maxfun = N * 200 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 200 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 200 + else: + maxfun = np.inf + + if bounds is not None: + # The default simplex construction may make all entries (for a given + # parameter) greater than an upper bound if x0 is very close to the + # upper bound. If one simply clips the simplex to the bounds this could + # make the simplex entries degenerate. If that occurs reflect into the + # interior. + msk = sim > upper_bound + # reflect into the interior + sim = np.where(msk, 2*upper_bound - sim, sim) + # but make sure the reflection is no less than the lower_bound + sim = np.clip(sim, lower_bound, upper_bound) + + one2np1 = list(range(1, N + 1)) + fsim = np.full((N + 1,), np.inf, dtype=float) + + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + try: + for k in range(N + 1): + fsim[k] = func(sim[k]) + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + + ind = np.argsort(fsim) + fsim = np.take(fsim, ind, 0) + # sort so sim[0,:] has the lowest function value + sim = np.take(sim, ind, 0) + + iterations = 1 + + while (fcalls[0] < maxfun and iterations < maxiter): + try: + if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and + np.max(np.abs(fsim[0] - fsim[1:])) <= fatol): + break + + xbar = np.add.reduce(sim[:-1], 0) / N + xr = (1 + rho) * xbar - rho * sim[-1] + if bounds is not None: + xr = np.clip(xr, lower_bound, upper_bound) + fxr = func(xr) + doshrink = 0 + + if fxr < fsim[0]: + xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] + if bounds is not None: + xe = np.clip(xe, lower_bound, upper_bound) + fxe = func(xe) + + if fxe < fxr: + sim[-1] = xe + fsim[-1] = fxe + else: + sim[-1] = xr + fsim[-1] = fxr + else: # fsim[0] <= fxr + if fxr < fsim[-2]: + sim[-1] = xr + fsim[-1] = fxr + else: # fxr >= fsim[-2] + # Perform contraction + if fxr < fsim[-1]: + xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] + if bounds is not None: + xc = np.clip(xc, lower_bound, upper_bound) + fxc = func(xc) + + if fxc <= fxr: + sim[-1] = xc + fsim[-1] = fxc + else: + doshrink = 1 + else: + # Perform an inside contraction + xcc = (1 - psi) * xbar + psi * sim[-1] + if bounds is not None: + xcc = np.clip(xcc, lower_bound, upper_bound) + fxcc = func(xcc) + + if fxcc < fsim[-1]: + sim[-1] = xcc + fsim[-1] = fxcc + else: + doshrink = 1 + + if doshrink: + for j in one2np1: + sim[j] = sim[0] + sigma * (sim[j] - sim[0]) + if bounds is not None: + sim[j] = np.clip( + sim[j], lower_bound, upper_bound) + fsim[j] = func(sim[j]) + iterations += 1 + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + if retall: + allvecs.append(sim[0]) + intermediate_result = OptimizeResult(x=sim[0], fun=fsim[0]) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + x = sim[0] + fval = np.min(fsim) + warnflag = 0 + + if fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif iterations >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + else: + msg = _status_message['success'] + if disp: + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iterations) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x, final_simplex=(sim, fsim)) + if retall: + result['allvecs'] = allvecs + return result + + +def approx_fprime(xk, f, epsilon=_epsilon, *args): + """Finite difference approximation of the derivatives of a + scalar or vector-valued function. + + If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form + an m-by-n matrix + called the Jacobian, where an element :math:`(i, j)` is a partial + derivative of f[i] with respect to ``xk[j]``. + + Parameters + ---------- + xk : array_like + The coordinate vector at which to determine the gradient of `f`. + f : callable + Function of which to estimate the derivatives of. Has the signature + ``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array + and `args` is a tuple of any additional fixed parameters needed to + completely specify the function. The argument `xk` passed to this + function is an ndarray of shape (n,) (never a scalar even if n=1). + It must return a 1-D array_like of shape (m,) or a scalar. + + .. versionchanged:: 1.9.0 + `f` is now able to return a 1-D array-like, with the :math:`(m, n)` + Jacobian being estimated. + + epsilon : {float, array_like}, optional + Increment to `xk` to use for determining the function gradient. + If a scalar, uses the same finite difference delta for all partial + derivatives. If an array, should contain one value per element of + `xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately + 1.49e-08. + \\*args : args, optional + Any other arguments that are to be passed to `f`. + + Returns + ------- + jac : ndarray + The partial derivatives of `f` to `xk`. + + See Also + -------- + check_grad : Check correctness of gradient function against approx_fprime. + + Notes + ----- + The function gradient is determined by the forward finite difference + formula:: + + f(xk[i] + epsilon[i]) - f(xk[i]) + f'[i] = --------------------------------- + epsilon[i] + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c0, c1): + ... "Coordinate vector `x` should be an array of size two." + ... return c0 * x[0]**2 + c1*x[1]**2 + + >>> x = np.ones(2) + >>> c0, c1 = (1, 200) + >>> eps = np.sqrt(np.finfo(float).eps) + >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) + array([ 2. , 400.00004198]) + + """ + xk = np.asarray(xk, float) + f0 = f(xk, *args) + + return approx_derivative(f, xk, method='2-point', abs_step=epsilon, + args=args, f0=f0) + + +def check_grad(func, grad, x0, *args, epsilon=_epsilon, + direction='all', seed=None): + """Check the correctness of a gradient function by comparing it against a + (forward) finite-difference approximation of the gradient. + + Parameters + ---------- + func : callable ``func(x0, *args)`` + Function whose derivative is to be checked. + grad : callable ``grad(x0, *args)`` + Jacobian of `func`. + x0 : ndarray + Points to check `grad` against forward difference approximation of grad + using `func`. + args : \\*args, optional + Extra arguments passed to `func` and `grad`. + epsilon : float, optional + Step size used for the finite difference approximation. It defaults to + ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08. + direction : str, optional + If set to ``'random'``, then gradients along a random vector + are used to check `grad` against forward difference approximation + using `func`. By default it is ``'all'``, in which case, all + the one hot direction vectors are considered to check `grad`. + If `func` is a vector valued function then only ``'all'`` can be used. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for reproducing the return value from this function. + The random numbers generated with this seed affect the random vector + along which gradients are computed to check ``grad``. Note that `seed` + is only used when `direction` argument is set to `'random'`. + + Returns + ------- + err : float + The square root of the sum of squares (i.e., the 2-norm) of the + difference between ``grad(x0, *args)`` and the finite difference + approximation of `grad` using func at the points `x0`. + + See Also + -------- + approx_fprime + + Examples + -------- + >>> import numpy as np + >>> def func(x): + ... return x[0]**2 - 0.5 * x[1]**3 + >>> def grad(x): + ... return [2 * x[0], -1.5 * x[1]**2] + >>> from scipy.optimize import check_grad + >>> check_grad(func, grad, [1.5, -1.5]) + 2.9802322387695312e-08 # may vary + >>> rng = np.random.default_rng() + >>> check_grad(func, grad, [1.5, -1.5], + ... direction='random', seed=rng) + 2.9802322387695312e-08 + + """ + step = epsilon + x0 = np.asarray(x0) + + def g(w, func, x0, v, *args): + return func(x0 + w*v, *args) + + if direction == 'random': + _grad = np.asanyarray(grad(x0, *args)) + if _grad.ndim > 1: + raise ValueError("'random' can only be used with scalar valued" + " func") + random_state = check_random_state(seed) + v = random_state.normal(0, 1, size=(x0.shape)) + _args = (func, x0, v) + args + _func = g + vars = np.zeros((1,)) + analytical_grad = np.dot(_grad, v) + elif direction == 'all': + _args = args + _func = func + vars = x0 + analytical_grad = grad(x0, *args) + else: + raise ValueError(f"{direction} is not a valid string for " + "``direction`` argument") + + return np.sqrt(np.sum(np.abs( + (analytical_grad - approx_fprime(vars, _func, step, *_args))**2 + ))) + + +def approx_fhess_p(x0, p, fprime, epsilon, *args): + # calculate fprime(x0) first, as this may be cached by ScalarFunction + f1 = fprime(*((x0,) + args)) + f2 = fprime(*((x0 + epsilon*p,) + args)) + return (f2 - f1) / epsilon + + +class _LineSearchError(RuntimeError): + pass + + +def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, + **kwargs): + """ + Same as line_search_wolfe1, but fall back to line_search_wolfe2 if + suitable step length is not found, and raise an exception if a + suitable step length is not found. + + Raises + ------ + _LineSearchError + If no suitable step size is found + + """ + + extra_condition = kwargs.pop('extra_condition', None) + + ret = line_search_wolfe1(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + **kwargs) + + if ret[0] is not None and extra_condition is not None: + xp1 = xk + ret[0] * pk + if not extra_condition(ret[0], xp1, ret[3], ret[5]): + # Reject step if extra_condition fails + ret = (None,) + + if ret[0] is None: + # line search failed: try different one. + with warnings.catch_warnings(): + warnings.simplefilter('ignore', LineSearchWarning) + kwargs2 = {} + for key in ('c1', 'c2', 'amax'): + if key in kwargs: + kwargs2[key] = kwargs[key] + ret = line_search_wolfe2(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + extra_condition=extra_condition, + **kwargs2) + + if ret[0] is None: + raise _LineSearchError() + + return ret + + +def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, + retall=0, callback=None, xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None): + """ + Minimize a function using the BFGS algorithm. + + Parameters + ---------- + f : callable ``f(x,*args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess, shape (n,) + fprime : callable ``f'(x,*args)``, optional + Gradient of f. + args : tuple, optional + Extra arguments passed to f and fprime. + gtol : float, optional + Terminate successfully if gradient norm is less than `gtol` + norm : float, optional + Order of norm (Inf is max, -Inf is min) + epsilon : int or ndarray, optional + If `fprime` is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function to call after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return ``fopt``, ``func_calls``, ``grad_calls``, and + ``warnflag`` in addition to ``xopt``. + disp : bool, optional + Print convergence message if True. + retall : bool, optional + Return a list of results at each iteration if True. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step + size is less than ``xk * xrtol`` where ``xk`` is the current + parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional`` + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Minimum value. + gopt : ndarray + Value of gradient at minimum, f'(xopt), which should be near 0. + Bopt : ndarray + Value of 1/f''(xopt), i.e., the inverse Hessian matrix. + func_calls : int + Number of function_calls made. + grad_calls : int + Number of gradient calls made. + warnflag : integer + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + 3 : NaN result encountered. + allvecs : list + The value of `xopt` at each iteration. Only returned if `retall` is + True. + + Notes + ----- + Optimize the function, `f`, whose gradient is given by `fprime` + using the quasi-Newton method of Broyden, Fletcher, Goldfarb, + and Shanno (BFGS). + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + See Also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See ``method='BFGS'`` in particular. + + References + ---------- + Wright, and Nocedal 'Numerical Optimization', 1999, p. 198. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import fmin_bfgs + >>> def quadratic_cost(x, Q): + ... return x @ Q @ x + ... + >>> x0 = np.array([-3, -4]) + >>> cost_weight = np.diag([1., 10.]) + >>> # Note that a trailing comma is necessary for a tuple with single element + >>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 # may vary + Function evaluations: 24 # may vary + Gradient evaluations: 8 # may vary + array([ 2.85169950e-06, -4.61820139e-07]) + + >>> def quadratic_cost_grad(x, Q): + ... return 2 * Q @ x + ... + >>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 + Function evaluations: 8 + Gradient evaluations: 8 + array([ 2.85916637e-06, -4.54371951e-07]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall, + 'xrtol': xrtol, + 'c1': c1, + 'c2': c2, + 'hess_inv0': hess_inv0} + + callback = _wrap_callback(callback) + res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + BFGS algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Terminate successfully if gradient norm is less than `gtol`. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step size is + less than ``xk * xrtol`` where ``xk`` is the current parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + If minimization doesn't complete successfully, with an error message of + ``Desired error not necessarily achieved due to precision loss``, then + consider setting `gtol` to a higher value. This precision loss typically + occurs when the (finite difference) numerical differentiation cannot provide + sufficient precision to satisfy the `gtol` termination criterion. + This can happen when working in single precision and a callable jac is not + provided. For single precision problems a `gtol` of 1e-3 seems to work. + """ + _check_unknown_options(unknown_options) + _check_positive_definite(hess_inv0) + retall = return_all + + x0 = asarray(x0).flatten() + if x0.ndim == 0: + x0.shape = (1,) + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + N = len(x0) + I = np.eye(N, dtype=int) + Hk = I if hess_inv0 is None else hess_inv0 + + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + xk = x0 + if retall: + allvecs = [x0] + warnflag = 0 + gnorm = vecnorm(gfk, ord=norm) + while (gnorm > gtol) and (k < maxiter): + pk = -np.dot(Hk, gfk) + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, + old_fval, old_old_fval, amin=1e-100, + amax=1e100, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + sk = alpha_k * pk + xkp1 = xk + sk + + if retall: + allvecs.append(xkp1) + xk = xkp1 + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + + yk = gfkp1 - gfk + gfk = gfkp1 + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + gnorm = vecnorm(gfk, ord=norm) + if (gnorm <= gtol): + break + + # See Chapter 5 in P.E. Frandsen, K. Jonasson, H.B. Nielsen, + # O. Tingleff: "Unconstrained Optimization", IMM, DTU. 1999. + # These notes are available here: + # http://www2.imm.dtu.dk/documents/ftp/publlec.html + if (alpha_k*vecnorm(pk) <= xrtol*(xrtol + vecnorm(xk))): + break + + if not np.isfinite(old_fval): + # We correctly found +-Inf as optimal value, or something went + # wrong. + warnflag = 2 + break + + rhok_inv = np.dot(yk, sk) + # this was handled in numeric, let it remains for more safety + # Cryptic comment above is preserved for posterity. Future reader: + # consider change to condition below proposed in gh-1261/gh-17345. + if rhok_inv == 0.: + rhok = 1000.0 + if disp: + msg = "Divide-by-zero encountered: rhok assumed large" + _print_success_message_or_warn(True, msg) + else: + rhok = 1. / rhok_inv + + A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok + A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok + Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] * + sk[np.newaxis, :]) + + fval = old_fval + + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def _print_success_message_or_warn(warnflag, message, warntype=None): + if not warnflag: + print(message) + else: + warnings.warn(message, warntype or OptimizeWarning, stacklevel=3) + + +def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.4): + """ + Minimize a function using a nonlinear conjugate gradient algorithm. + + Parameters + ---------- + f : callable, ``f(x, *args)`` + Objective function to be minimized. Here `x` must be a 1-D array of + the variables that are to be changed in the search for a minimum, and + `args` are the other (fixed) parameters of `f`. + x0 : ndarray + A user-supplied initial estimate of `xopt`, the optimal value of `x`. + It must be a 1-D array of values. + fprime : callable, ``fprime(x, *args)``, optional + A function that returns the gradient of `f` at `x`. Here `x` and `args` + are as described above for `f`. The returned value must be a 1-D array. + Defaults to None, in which case the gradient is approximated + numerically (see `epsilon`, below). + args : tuple, optional + Parameter values passed to `f` and `fprime`. Must be supplied whenever + additional fixed parameters are needed to completely specify the + functions `f` and `fprime`. + gtol : float, optional + Stop when the norm of the gradient is less than `gtol`. + norm : float, optional + Order to use for the norm of the gradient + (``-np.inf`` is min, ``np.inf`` is max). + epsilon : float or ndarray, optional + Step size(s) to use when `fprime` is approximated numerically. Can be a + scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the + floating point machine precision. Usually ``sqrt(eps)`` is about + 1.5e-8. + maxiter : int, optional + Maximum number of iterations to perform. Default is ``200 * len(x0)``. + full_output : bool, optional + If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in + addition to `xopt`. See the Returns section below for additional + information on optional return values. + disp : bool, optional + If True, return a convergence message, followed by `xopt`. + retall : bool, optional + If True, add to the returned values the results of each iteration. + callback : callable, optional + An optional user-supplied function, called after each iteration. + Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float, optional + Minimum value found, f(xopt). Only returned if `full_output` is True. + func_calls : int, optional + The number of function_calls made. Only returned if `full_output` + is True. + grad_calls : int, optional + The number of gradient calls made. Only returned if `full_output` is + True. + warnflag : int, optional + Integer value with warning status, only returned if `full_output` is + True. + + 0 : Success. + + 1 : The maximum number of iterations was exceeded. + + 2 : Gradient and/or function calls were not changing. May indicate + that precision was lost, i.e., the routine did not converge. + + 3 : NaN result encountered. + + allvecs : list of ndarray, optional + List of arrays, containing the results at each iteration. + Only returned if `retall` is True. + + See Also + -------- + minimize : common interface to all `scipy.optimize` algorithms for + unconstrained and constrained minimization of multivariate + functions. It provides an alternative way to call + ``fmin_cg``, by specifying ``method='CG'``. + + Notes + ----- + This conjugate gradient algorithm is based on that of Polak and Ribiere + [1]_. + + Conjugate gradient methods tend to work better when: + + 1. `f` has a unique global minimizing point, and no local minima or + other stationary points, + 2. `f` is, at least locally, reasonably well approximated by a + quadratic function of the variables, + 3. `f` is continuous and has a continuous gradient, + 4. `fprime` is not too large, e.g., has a norm less than 1000, + 5. The initial guess, `x0`, is reasonably close to `f` 's global + minimizing point, `xopt`. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. + + Examples + -------- + Example 1: seek the minimum value of the expression + ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values + of the parameters and an initial guess ``(u, v) = (0, 0)``. + + >>> import numpy as np + >>> args = (2, 3, 7, 8, 9, 10) # parameter values + >>> def f(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f + >>> def gradf(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... gu = 2*a*u + b*v + d # u-component of the gradient + ... gv = b*u + 2*c*v + e # v-component of the gradient + ... return np.asarray((gu, gv)) + >>> x0 = np.asarray((0, 0)) # Initial guess. + >>> from scipy import optimize + >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res1 + array([-1.80851064, -0.25531915]) + + Example 2: solve the same problem using the `minimize` function. + (This `myopts` dictionary shows all of the available options, + although in practice only non-default values would be needed. + The returned value will be a dictionary.) + + >>> opts = {'maxiter' : None, # default value. + ... 'disp' : True, # non-default value. + ... 'gtol' : 1e-5, # default value. + ... 'norm' : np.inf, # default value. + ... 'eps' : 1.4901161193847656e-08} # default value. + >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, + ... method='CG', options=opts) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res2.x # minimum found + array([-1.80851064, -0.25531915]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_cg(f, x0, args, fprime, callback=callback, c1=c1, c2=c2, + **opts) + + if full_output: + retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_cg(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + c1=1e-4, c2=0.4, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + conjugate gradient algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + + retall = return_all + + x0 = asarray(x0).flatten() + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + xk = x0 + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + if retall: + allvecs = [xk] + warnflag = 0 + pk = -gfk + gnorm = vecnorm(gfk, ord=norm) + + sigma_3 = 0.01 + + while (gnorm > gtol) and (k < maxiter): + deltak = np.dot(gfk, gfk) + + cached_step = [None] + + def polak_ribiere_powell_step(alpha, gfkp1=None): + xkp1 = xk + alpha * pk + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + yk = gfkp1 - gfk + beta_k = max(0, np.dot(yk, gfkp1) / deltak) + pkp1 = -gfkp1 + beta_k * pk + gnorm = vecnorm(gfkp1, ord=norm) + return (alpha, xkp1, pkp1, gfkp1, gnorm) + + def descent_condition(alpha, xkp1, fp1, gfkp1): + # Polak-Ribiere+ needs an explicit check of a sufficient + # descent condition, which is not guaranteed by strong Wolfe. + # + # See Gilbert & Nocedal, "Global convergence properties of + # conjugate gradient methods for optimization", + # SIAM J. Optimization 2, 21 (1992). + cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) + alpha, xk, pk, gfk, gnorm = cached_step + + # Accept step if it leads to convergence. + if gnorm <= gtol: + return True + + # Accept step if sufficient descent condition applies. + return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk) + + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, + old_old_fval, c1=c1, c2=c2, amin=1e-100, + amax=1e100, extra_condition=descent_condition) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + # Reuse already computed results if possible + if alpha_k == cached_step[0]: + alpha_k, xk, pk, gfk, gnorm = cached_step + else: + alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) + + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + fval = old_fval + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.9): + """ + Unconstrained minimization of a function using the Newton-CG method. + + Parameters + ---------- + f : callable ``f(x, *args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable ``f'(x, *args)`` + Gradient of f. + fhess_p : callable ``fhess_p(x, p, *args)``, optional + Function which computes the Hessian of f times an + arbitrary vector, p. + fhess : callable ``fhess(x, *args)``, optional + Function to compute the Hessian matrix of f. + args : tuple, optional + Extra arguments passed to f, fprime, fhess_p, and fhess + (the same set of extra arguments is supplied to all of + these functions). + epsilon : float or ndarray, optional + If fhess is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function which is called after + each iteration. Called as callback(xk), where xk is the + current parameter vector. + avextol : float, optional + Convergence is assumed when the average relative error in + the minimizer falls below this amount. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return the optional outputs. + disp : bool, optional + If True, print convergence message. + retall : bool, optional + If True, return a list of results at each iteration. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Value of the function at xopt, i.e., ``fopt = f(xopt)``. + fcalls : int + Number of function calls made. + gcalls : int + Number of gradient calls made. + hcalls : int + Number of Hessian calls made. + warnflag : int + Warnings generated by the algorithm. + 1 : Maximum number of iterations exceeded. + 2 : Line search failure (precision loss). + 3 : NaN result encountered. + allvecs : list + The result at each iteration, if retall is True (see below). + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Newton-CG' `method` in particular. + + Notes + ----- + Only one of `fhess_p` or `fhess` need to be given. If `fhess` + is provided, then `fhess_p` will be ignored. If neither `fhess` + nor `fhess_p` is provided, then the hessian product will be + approximated using finite differences on `fprime`. `fhess_p` + must compute the hessian times an arbitrary vector. If it is not + given, finite-differences on `fprime` are used to compute + it. + + Newton-CG methods are also called truncated Newton methods. This + function differs from scipy.optimize.fmin_tnc because + + 1. scipy.optimize.fmin_ncg is written purely in Python using NumPy + and scipy while scipy.optimize.fmin_tnc calls a C function. + 2. scipy.optimize.fmin_ncg is only for unconstrained minimization + while scipy.optimize.fmin_tnc is for unconstrained minimization + or box constrained minimization. (Box constraints give + lower and upper bounds for each variable separately.) + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + Wright & Nocedal, 'Numerical Optimization', 1999, p. 140. + + """ + opts = {'xtol': avextol, + 'eps': epsilon, + 'maxiter': maxiter, + 'disp': disp, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, + callback=callback, c1=c1, c2=c2, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['nfev'], res['njev'], + res['nhev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, + disp=False, return_all=False, c1=1e-4, c2=0.9, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Newton-CG algorithm. + + Note that the `jac` parameter (Jacobian) is required. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Average relative error in solution `xopt` acceptable for + convergence. + maxiter : int + Maximum number of iterations to perform. + eps : float or ndarray + If `hessp` is approximated, use this value for the step size. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + if jac is None: + raise ValueError('Jacobian is required for Newton-CG method') + fhess_p = hessp + fhess = hess + avextol = xtol + epsilon = eps + retall = return_all + + x0 = asarray(x0).flatten() + # TODO: add hessp (callable or FD) to ScalarFunction? + sf = _prepare_scalar_function( + fun, x0, jac, args=args, epsilon=eps, hess=hess + ) + f = sf.fun + fprime = sf.grad + _h = sf.hess(x0) + + # Logic for hess/hessp + # - If a callable(hess) is provided, then use that + # - If hess is a FD_METHOD, or the output from hess(x) is a LinearOperator + # then create a hessp function using those. + # - If hess is None but you have callable(hessp) then use the hessp. + # - If hess and hessp are None then approximate hessp using the grad/jac. + + if (hess in FD_METHODS or isinstance(_h, LinearOperator)): + fhess = None + + def _hessp(x, p, *args): + return sf.hess(x).dot(p) + + fhess_p = _hessp + + def terminate(warnflag, msg): + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % old_fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % hcalls) + fval = old_fval + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, nhev=hcalls, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + hcalls = 0 + if maxiter is None: + maxiter = len(x0)*200 + cg_maxiter = 20*len(x0) + + xtol = len(x0) * avextol + # Make sure we enter the while loop. + update_l1norm = np.finfo(float).max + xk = np.copy(x0) + if retall: + allvecs = [xk] + k = 0 + gfk = None + old_fval = f(x0) + old_old_fval = None + float64eps = np.finfo(np.float64).eps + while update_l1norm > xtol: + if k >= maxiter: + msg = "Warning: " + _status_message['maxiter'] + return terminate(1, msg) + # Compute a search direction pk by applying the CG method to + # del2 f(xk) p = - grad f(xk) starting from 0. + b = -fprime(xk) + maggrad = np.linalg.norm(b, ord=1) + eta = min(0.5, math.sqrt(maggrad)) + termcond = eta * maggrad + xsupi = zeros(len(x0), dtype=x0.dtype) + ri = -b + psupi = -ri + i = 0 + dri0 = np.dot(ri, ri) + + if fhess is not None: # you want to compute hessian once. + A = sf.hess(xk) + hcalls += 1 + + for k2 in range(cg_maxiter): + if np.add.reduce(np.abs(ri)) <= termcond: + break + if fhess is None: + if fhess_p is None: + Ap = approx_fhess_p(xk, psupi, fprime, epsilon) + else: + Ap = fhess_p(xk, psupi, *args) + hcalls += 1 + else: + # hess was supplied as a callable or hessian update strategy, so + # A is a dense numpy array or sparse matrix + Ap = A.dot(psupi) + # check curvature + Ap = asarray(Ap).squeeze() # get rid of matrices... + curv = np.dot(psupi, Ap) + if 0 <= curv <= 3 * float64eps: + break + elif curv < 0: + if (i > 0): + break + else: + # fall back to steepest descent direction + xsupi = dri0 / (-curv) * b + break + alphai = dri0 / curv + xsupi += alphai * psupi + ri += alphai * Ap + dri1 = np.dot(ri, ri) + betai = dri1 / dri0 + psupi = -ri + betai * psupi + i += 1 + dri0 = dri1 # update np.dot(ri,ri) for next time. + else: + # curvature keeps increasing, bail out + msg = ("Warning: CG iterations didn't converge. The Hessian is not " + "positive definite.") + return terminate(3, msg) + + pk = xsupi # search direction is solution to system. + gfk = -b # gradient at xk + + try: + alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + msg = "Warning: " + _status_message['pr_loss'] + return terminate(2, msg) + + update = alphak * pk + xk += update # upcast if necessary + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + return terminate(5, "") + update_l1norm = np.linalg.norm(update, ord=1) + + else: + if np.isnan(old_fval) or np.isnan(update_l1norm): + return terminate(3, _status_message['nan']) + + msg = _status_message['success'] + return terminate(0, msg) + + +def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, + full_output=0, disp=1): + """Bounded minimization for scalar functions. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : float or array scalar + Finite optimization bounds. + args : tuple, optional + Extra arguments passed to function. + xtol : float, optional + The convergence tolerance. + maxfun : int, optional + Maximum number of function evaluations allowed. + full_output : bool, optional + If True, return optional outputs. + disp : int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + fval : number + (Optional output) The function value evaluated at the minimizer. + ierr : int + (Optional output) An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + (Optional output) The number of function calls made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Bounded' `method` in particular. + + Notes + ----- + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing.) + + References + ---------- + .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + Examples + -------- + `fminbound` finds the minimizer of the function in the given range. + The following examples illustrate this. + + >>> from scipy import optimize + >>> def f(x): + ... return (x-1)**2 + >>> minimizer = optimize.fminbound(f, -4, 4) + >>> minimizer + 1.0 + >>> minimum = f(minimizer) + >>> minimum + 0.0 + >>> res = optimize.fminbound(f, 3, 4, full_output=True) + >>> minimizer, fval, ierr, numfunc = res + >>> minimizer + 3.000005960860986 + >>> minimum = f(minimizer) + >>> minimum, fval + (4.000023843479476, 4.000023843479476) + """ + options = {'xatol': xtol, + 'maxiter': maxfun, + 'disp': disp} + + res = _minimize_scalar_bounded(func, (x1, x2), args, **options) + if full_output: + return res['x'], res['fun'], res['status'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_bounded(func, bounds, args=(), + xatol=1e-5, maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + xatol : float + Absolute error in solution `xopt` acceptable for convergence. + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + # Test bounds are of correct form + if len(bounds) != 2: + raise ValueError('bounds must have two elements.') + x1, x2 = bounds + + if not (is_finite_scalar(x1) and is_finite_scalar(x2)): + raise ValueError("Optimization bounds must be finite scalars.") + + if x1 > x2: + raise ValueError("The lower bound exceeds the upper bound.") + + flag = 0 + header = ' Func-count x f(x) Procedure' + step = ' initial' + + sqrt_eps = sqrt(2.2e-16) + golden_mean = 0.5 * (3.0 - sqrt(5.0)) + a, b = x1, x2 + fulc = a + golden_mean * (b - a) + nfc, xf = fulc, fulc + rat = e = 0.0 + x = xf + fx = func(x, *args) + num = 1 + fmin_data = (1, xf, fx) + fu = np.inf + + ffulc = fnfc = fx + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if disp > 2: + print(" ") + print(header) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))): + golden = 1 + # Check for parabolic fit + if np.abs(e) > tol1: + golden = 0 + r = (xf - nfc) * (fx - ffulc) + q = (xf - fulc) * (fx - fnfc) + p = (xf - fulc) * q - (xf - nfc) * r + q = 2.0 * (q - r) + if q > 0.0: + p = -p + q = np.abs(q) + r = e + e = rat + + # Check for acceptability of parabola + if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and + (p < q * (b - xf))): + rat = (p + 0.0) / q + x = xf + rat + step = ' parabolic' + + if ((x - a) < tol2) or ((b - x) < tol2): + si = np.sign(xm - xf) + ((xm - xf) == 0) + rat = tol1 * si + else: # do a golden-section step + golden = 1 + + if golden: # do a golden-section step + if xf >= xm: + e = a - xf + else: + e = b - xf + rat = golden_mean*e + step = ' golden' + + si = np.sign(rat) + (rat == 0) + x = xf + si * np.maximum(np.abs(rat), tol1) + fu = func(x, *args) + num += 1 + fmin_data = (num, x, fu) + if disp > 2: + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + if fu <= fx: + if x >= xf: + a = xf + else: + b = xf + fulc, ffulc = nfc, fnfc + nfc, fnfc = xf, fx + xf, fx = x, fu + else: + if x < xf: + a = x + else: + b = x + if (fu <= fnfc) or (nfc == xf): + fulc, ffulc = nfc, fnfc + nfc, fnfc = x, fu + elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): + fulc, ffulc = x, fu + + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if num >= maxfun: + flag = 1 + break + + if np.isnan(xf) or np.isnan(fx) or np.isnan(fu): + flag = 2 + + fval = fx + if disp > 0: + _endprint(x, flag, fval, maxfun, xatol, disp) + + result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), + message={0: 'Solution found.', + 1: 'Maximum number of function calls ' + 'reached.', + 2: _status_message['nan']}.get(flag, ''), + x=xf, nfev=num, nit=num) + + return result + + +class Brent: + #need to rethink design of __init__ + def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, + full_output=0, disp=0): + self.func = func + self.args = args + self.tol = tol + self.maxiter = maxiter + self._mintol = 1.0e-11 + self._cg = 0.3819660 + self.xmin = None + self.fval = None + self.iter = 0 + self.funcalls = 0 + self.disp = disp + + # need to rethink design of set_bracket (new options, etc.) + def set_bracket(self, brack=None): + self.brack = brack + + def get_bracket_info(self): + #set up + func = self.func + args = self.args + brack = self.brack + ### BEGIN core bracket_info code ### + ### carefully DOCUMENT any CHANGES in core ## + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + + funcalls = 3 + else: + raise ValueError("Bracketing interval must be " + "length 2 or 3 sequence.") + ### END core bracket_info code ### + + return xa, xb, xc, fa, fb, fc, funcalls + + def optimize(self): + # set up for optimization + func = self.func + xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() + _mintol = self._mintol + _cg = self._cg + ################################# + #BEGIN CORE ALGORITHM + ################################# + x = w = v = xb + fw = fv = fx = fb + if (xa < xc): + a = xa + b = xc + else: + a = xc + b = xa + deltax = 0.0 + iter = 0 + + if self.disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + while (iter < self.maxiter): + tol1 = self.tol * np.abs(x) + _mintol + tol2 = 2.0 * tol1 + xmid = 0.5 * (a + b) + # check for convergence + if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)): + break + # XXX In the first iteration, rat is only bound in the true case + # of this conditional. This used to cause an UnboundLocalError + # (gh-4140). It should be set before the if (but to what?). + if (np.abs(deltax) <= tol1): + if (x >= xmid): + deltax = a - x # do a golden section step + else: + deltax = b - x + rat = _cg * deltax + else: # do a parabolic step + tmp1 = (x - w) * (fx - fv) + tmp2 = (x - v) * (fx - fw) + p = (x - v) * tmp2 - (x - w) * tmp1 + tmp2 = 2.0 * (tmp2 - tmp1) + if (tmp2 > 0.0): + p = -p + tmp2 = np.abs(tmp2) + dx_temp = deltax + deltax = rat + # check parabolic fit + if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and + (np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))): + rat = p * 1.0 / tmp2 # if parabolic step is useful. + u = x + rat + if ((u - a) < tol2 or (b - u) < tol2): + if xmid - x >= 0: + rat = tol1 + else: + rat = -tol1 + else: + if (x >= xmid): + deltax = a - x # if it's not do a golden section step + else: + deltax = b - x + rat = _cg * deltax + + if (np.abs(rat) < tol1): # update by at least tol1 + if rat >= 0: + u = x + tol1 + else: + u = x - tol1 + else: + u = x + rat + fu = func(*((u,) + self.args)) # calculate new output value + funcalls += 1 + + if (fu > fx): # if it's bigger than current + if (u < x): + a = u + else: + b = u + if (fu <= fw) or (w == x): + v = w + w = u + fv = fw + fw = fu + elif (fu <= fv) or (v == x) or (v == w): + v = u + fv = fu + else: + if (u >= x): + a = x + else: + b = x + v = w + w = x + x = u + fv = fw + fw = fx + fx = fu + + if self.disp > 2: + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + iter += 1 + ################################# + #END CORE ALGORITHM + ################################# + + self.xmin = x + self.fval = fx + self.iter = iter + self.funcalls = funcalls + + def get_result(self, full_output=False): + if full_output: + return self.xmin, self.fval, self.iter, self.funcalls + else: + return self.xmin + + +def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): + """ + Given a function of one variable and a possible bracket, return + a local minimizer of the function isolated to a fractional precision + of tol. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. + args : tuple, optional + Additional arguments (if present). + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair + ``(xa, xb)`` to be used as initial points for a downhill bracket search + (see `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + Relative error in solution `xopt` acceptable for convergence. + full_output : bool, optional + If True, return all output args (xmin, fval, iter, + funcalls). + maxiter : int, optional + Maximum number of iterations in solution. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + iter : int + (Optional output) Number of iterations. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Brent' `method` in particular. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + Does not ensure that the minimum lies in the range specified by + `brack`. See `scipy.optimize.fminbound`. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3 respectively. In the case where `brack` is of the + form ``(xa, xb)``, we can see for the given values, the output does + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.brent(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.brent(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, iter, funcalls = res + >>> f(xmin), fval + (0.0, 0.0) + + """ + options = {'xtol': tol, + 'maxiter': maxiter} + res = _minimize_scalar_brent(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nit'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, + maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + """ + _check_unknown_options(unknown_options) + tol = xtol + if tol < 0: + raise ValueError('tolerance should be >= 0, got %r' % tol) + + brent = Brent(func=func, args=args, tol=tol, + full_output=True, maxiter=maxiter, disp=disp) + brent.set_bracket(brack) + brent.optimize() + x, fval, nit, nfev = brent.get_result(full_output=True) + + success = nit < maxiter and not (np.isnan(x) or np.isnan(fval)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(x) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, + success=success, message=message) + + +def golden(func, args=(), brack=None, tol=_epsilon, + full_output=0, maxiter=5000): + """ + Return the minimizer of a function of one variable using the golden section + method. + + Given a function of one variable and a possible bracketing interval, + return a minimizer of the function isolated to a fractional precision of + tol. + + Parameters + ---------- + func : callable func(x,*args) + Objective function to minimize. + args : tuple, optional + Additional arguments (if present), passed to func. + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` where ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair (xa, xb) + to be used as initial points for a downhill bracket search (see + `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + x tolerance stop criterion + full_output : bool, optional + If True, return optional outputs. + maxiter : int + Maximum number of iterations to perform. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Golden' `method` in particular. + + Notes + ----- + Uses analog of bisection method to decrease the bracketed + interval. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3, respectively. In the case where `brack` is of the + form (xa,xb), we can see for the given values, the output need + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.golden(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.golden(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, funcalls = res + >>> f(xmin), fval + (9.925165290385052e-18, 9.925165290385052e-18) + + """ + options = {'xtol': tol, 'maxiter': maxiter} + res = _minimize_scalar_golden(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_golden(func, brack=None, args=(), + xtol=_epsilon, maxiter=5000, disp=0, + **unknown_options): + """ + Options + ------- + xtol : float + Relative error in solution `xopt` acceptable for convergence. + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + """ + _check_unknown_options(unknown_options) + tol = xtol + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + funcalls = 3 + else: + raise ValueError("Bracketing interval must be length 2 or 3 sequence.") + + _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) + _gC = 1.0 - _gR + x3 = xc + x0 = xa + if (np.abs(xc - xb) > np.abs(xb - xa)): + x1 = xb + x2 = xb + _gC * (xc - xb) + else: + x2 = xb + x1 = xb - _gC * (xb - xa) + f1 = func(*((x1,) + args)) + f2 = func(*((x2,) + args)) + funcalls += 2 + nit = 0 + + if disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + + for i in range(maxiter): + if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)): + break + if (f2 < f1): + x0 = x1 + x1 = x2 + x2 = _gR * x1 + _gC * x3 + f1 = f2 + f2 = func(*((x2,) + args)) + else: + x3 = x2 + x2 = x1 + x1 = _gR * x2 + _gC * x0 + f2 = f1 + f1 = func(*((x1,) + args)) + funcalls += 1 + if disp > 2: + if (f1 < f2): + xmin, fval = x1, f1 + else: + xmin, fval = x2, f2 + print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}") + + nit += 1 + # end of iteration loop + + if (f1 < f2): + xmin = x1 + fval = f1 + else: + xmin = x2 + fval = f2 + + success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(xmin) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, + success=success, message=message) + + +def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): + """ + Bracket the minimum of a function. + + Given a function and distinct initial points, search in the + downhill direction (as defined by the initial points) and return + three points that bracket the minimum of the function. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to minimize. + xa, xb : float, optional + Initial points. Defaults `xa` to 0.0, and `xb` to 1.0. + A local minimum need not be contained within this interval. + args : tuple, optional + Additional arguments (if present), passed to `func`. + grow_limit : float, optional + Maximum grow limit. Defaults to 110.0 + maxiter : int, optional + Maximum number of iterations to perform. Defaults to 1000. + + Returns + ------- + xa, xb, xc : float + Final points of the bracket. + fa, fb, fc : float + Objective function values at the bracket points. + funcalls : int + Number of function evaluations made. + + Raises + ------ + BracketError + If no valid bracket is found before the algorithm terminates. + See notes for conditions of a valid bracket. + + Notes + ----- + The algorithm attempts to find three strictly ordered points (i.e. + :math:`x_a < x_b < x_c` or :math:`x_c < x_b < x_a`) satisfying + :math:`f(x_b) ≤ f(x_a)` and :math:`f(x_b) ≤ f(x_c)`, where one of the + inequalities must be satistfied strictly and all :math:`x_i` must be + finite. + + Examples + -------- + This function can find a downward convex region of a function: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import bracket + >>> def f(x): + ... return 10*x**2 + 3*x + 5 + >>> x = np.linspace(-2, 2) + >>> y = f(x) + >>> init_xa, init_xb = 0.1, 1 + >>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb) + >>> plt.axvline(x=init_xa, color="k", linestyle="--") + >>> plt.axvline(x=init_xb, color="k", linestyle="--") + >>> plt.plot(x, y, "-k") + >>> plt.plot(xa, fa, "bx") + >>> plt.plot(xb, fb, "rx") + >>> plt.plot(xc, fc, "bx") + >>> plt.show() + + Note that both initial points were to the right of the minimum, and the + third point was found in the "downhill" direction: the direction + in which the function appeared to be decreasing (to the left). + The final points are strictly ordered, and the function value + at the middle point is less than the function values at the endpoints; + it follows that a minimum must lie within the bracket. + + """ + _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 + _verysmall_num = 1e-21 + # convert to numpy floats if not already + xa, xb = np.asarray([xa, xb]) + fa = func(*(xa,) + args) + fb = func(*(xb,) + args) + if (fa < fb): # Switch so fa > fb + xa, xb = xb, xa + fa, fb = fb, fa + xc = xb + _gold * (xb - xa) + fc = func(*((xc,) + args)) + funcalls = 3 + iter = 0 + while (fc < fb): + tmp1 = (xb - xa) * (fb - fc) + tmp2 = (xb - xc) * (fb - fa) + val = tmp2 - tmp1 + if np.abs(val) < _verysmall_num: + denom = 2.0 * _verysmall_num + else: + denom = 2.0 * val + w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom + wlim = xb + grow_limit * (xc - xb) + msg = ("No valid bracket was found before the iteration limit was " + "reached. Consider trying different initial points or " + "increasing `maxiter`.") + if iter > maxiter: + raise RuntimeError(msg) + iter += 1 + if (w - xc) * (xb - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xa = xb + xb = w + fa = fb + fb = fw + break + elif (fw > fb): + xc = w + fc = fw + break + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(wlim - xc) >= 0.0: + w = wlim + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(xc - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xb = xc + xc = w + w = xc + _gold * (xc - xb) + fb = fc + fc = fw + fw = func(*((w,) + args)) + funcalls += 1 + else: + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + xa = xb + xb = xc + xc = w + fa = fb + fb = fc + fc = fw + + # three conditions for a valid bracket + cond1 = (fb < fc and fb <= fa) or (fb < fa and fb <= fc) + cond2 = (xa < xb < xc or xc < xb < xa) + cond3 = np.isfinite(xa) and np.isfinite(xb) and np.isfinite(xc) + msg = ("The algorithm terminated without finding a valid bracket. " + "Consider trying different initial points.") + if not (cond1 and cond2 and cond3): + e = BracketError(msg) + e.data = (xa, xb, xc, fa, fb, fc, funcalls) + raise e + + return xa, xb, xc, fa, fb, fc, funcalls + + +class BracketError(RuntimeError): + pass + + +def _recover_from_bracket_error(solver, fun, bracket, args, **options): + # `bracket` was originally written without checking whether the resulting + # bracket is valid. `brent` and `golden` built on top of it without + # checking the returned bracket for validity, and their output can be + # incorrect without warning/error if the original bracket is invalid. + # gh-14858 noticed the problem, and the following is the desired + # behavior: + # - `scipy.optimize.bracket`, `scipy.optimize.brent`, and + # `scipy.optimize.golden` should raise an error if the bracket is + # invalid, as opposed to silently returning garbage + # - `scipy.optimize.minimize_scalar` should return with `success=False` + # and other information + # The changes that would be required to achieve this the traditional + # way (`return`ing all the required information from bracket all the way + # up to `minimizer_scalar`) are extensive and invasive. (See a6aa40d.) + # We can achieve the same thing by raising the error in `bracket`, but + # storing the information needed by `minimize_scalar` in the error object, + # and intercepting it here. + try: + res = solver(fun, bracket, args, **options) + except BracketError as e: + msg = str(e) + xa, xb, xc, fa, fb, fc, funcalls = e.data + xs, fs = [xa, xb, xc], [fa, fb, fc] + if np.any(np.isnan([xs, fs])): + x, fun = np.nan, np.nan + else: + imin = np.argmin(fs) + x, fun = xs[imin], fs[imin] + return OptimizeResult(fun=fun, nfev=funcalls, x=x, + nit=0, success=False, message=msg) + return res + + +def _line_for_search(x0, alpha, lower_bound, upper_bound): + """ + Given a parameter vector ``x0`` with length ``n`` and a direction + vector ``alpha`` with length ``n``, and lower and upper bounds on + each of the ``n`` parameters, what are the bounds on a scalar + ``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``. + + + Parameters + ---------- + x0 : np.array. + The vector representing the current location. + Note ``np.shape(x0) == (n,)``. + alpha : np.array. + The vector representing the direction. + Note ``np.shape(alpha) == (n,)``. + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + + Returns + ------- + res : tuple ``(lmin, lmax)`` + The bounds for ``l`` such that + ``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]`` + for all ``i``. + + """ + # get nonzero indices of alpha so we don't get any zero division errors. + # alpha will not be all zero, since it is called from _linesearch_powell + # where we have a check for this. + nonzero, = alpha.nonzero() + lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero] + x0, alpha = x0[nonzero], alpha[nonzero] + low = (lower_bound - x0) / alpha + high = (upper_bound - x0) / alpha + + # positive and negative indices + pos = alpha > 0 + + lmin_pos = np.where(pos, low, 0) + lmin_neg = np.where(pos, 0, high) + lmax_pos = np.where(pos, high, 0) + lmax_neg = np.where(pos, 0, low) + + lmin = np.max(lmin_pos + lmin_neg) + lmax = np.min(lmax_pos + lmax_neg) + + # if x0 is outside the bounds, then it is possible that there is + # no way to get back in the bounds for the parameters being updated + # with the current direction alpha. + # when this happens, lmax < lmin. + # If this is the case, then we can just return (0, 0) + return (lmin, lmax) if lmax >= lmin else (0, 0) + + +def _linesearch_powell(func, p, xi, tol=1e-3, + lower_bound=None, upper_bound=None, fval=None): + """Line-search algorithm using fminbound. + + Find the minimum of the function ``func(x0 + alpha*direc)``. + + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + fval : number. + ``fval`` is equal to ``func(p)``, the idea is just to avoid + recomputing it so we can limit the ``fevals``. + + """ + def myfunc(alpha): + return func(p + alpha*xi) + + # if xi is zero, then don't optimize + if not np.any(xi): + return ((fval, p, xi) if fval is not None else (func(p), p, xi)) + elif lower_bound is None and upper_bound is None: + # non-bounded minimization + res = _recover_from_bracket_error(_minimize_scalar_brent, + myfunc, None, tuple(), xtol=tol) + alpha_min, fret = res.x, res.fun + xi = alpha_min * xi + return squeeze(fret), p + xi, xi + else: + bound = _line_for_search(p, xi, lower_bound, upper_bound) + if np.isneginf(bound[0]) and np.isposinf(bound[1]): + # equivalent to unbounded + return _linesearch_powell(func, p, xi, fval=fval, tol=tol) + elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]): + # we can use a bounded scalar minimization + res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100) + xi = res.x * xi + return squeeze(res.fun), p + xi, xi + else: + # only bounded on one side. use the tangent function to convert + # the infinity bound to a finite bound. The new bounded region + # is a subregion of the region bounded by -np.pi/2 and np.pi/2. + bound = np.arctan(bound[0]), np.arctan(bound[1]) + res = _minimize_scalar_bounded( + lambda x: myfunc(np.tan(x)), + bound, + xatol=tol / 100) + xi = np.tan(res.x) * xi + return squeeze(res.fun), p + xi, xi + + +def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, + maxfun=None, full_output=0, disp=1, retall=0, callback=None, + direc=None): + """ + Minimize a function using modified Powell's method. + + This method only uses function values, not derivatives. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func. + xtol : float, optional + Line-search error tolerance. + ftol : float, optional + Relative error in ``func(xopt)`` acceptable for convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : int, optional + Maximum number of function evaluations to make. + full_output : bool, optional + If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and + ``warnflag`` are returned. + disp : bool, optional + If True, print convergence messages. + retall : bool, optional + If True, return a list of the solution at each iteration. + callback : callable, optional + An optional user-supplied function, called after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + direc : ndarray, optional + Initial fitting step and parameter order set as an (N, N) array, where N + is the number of fitting parameters in `x0`. Defaults to step size 1.0 + fitting all parameters simultaneously (``np.eye((N, N))``). To + prevent initial consideration of values in a step or to change initial + step size, set to 0 or desired step size in the Jth position in the Mth + block, where J is the position in `x0` and M is the desired evaluation + step, with steps being evaluated in index order. Step size and ordering + will change freely as minimization proceeds. + + Returns + ------- + xopt : ndarray + Parameter which minimizes `func`. + fopt : number + Value of function at minimum: ``fopt = func(xopt)``. + direc : ndarray + Current direction set. + iter : int + Number of iterations. + funcalls : int + Number of function calls made. + warnflag : int + Integer warning flag: + 1 : Maximum number of function evaluations. + 2 : Maximum number of iterations. + 3 : NaN result encountered. + 4 : The result is out of the provided bounds. + allvecs : list + List of solutions at each iteration. + + See also + -------- + minimize: Interface to unconstrained minimization algorithms for + multivariate functions. See the 'Powell' method in particular. + + Notes + ----- + Uses a modification of Powell's method to find the minimum of + a function of N variables. Powell's method is a conjugate + direction method. + + The algorithm has two loops. The outer loop merely iterates over the inner + loop. The inner loop minimizes over each current direction in the direction + set. At the end of the inner loop, if certain conditions are met, the + direction that gave the largest decrease is dropped and replaced with the + difference between the current estimated x and the estimated x from the + beginning of the inner-loop. + + The technical conditions for replacing the direction of greatest + increase amount to checking that + + 1. No further gain can be made along the direction of greatest increase + from that iteration. + 2. The direction of greatest increase accounted for a large sufficient + fraction of the decrease in the function value from that iteration of + the inner loop. + + References + ---------- + Powell M.J.D. (1964) An efficient method for finding the minimum of a + function of several variables without calculating derivatives, + Computer Journal, 7 (2):155-162. + + Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: + Numerical Recipes (any edition), Cambridge University Press + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin_powell(f, -1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 2 + Function evaluations: 16 + >>> minimum + array(0.0) + + """ + opts = {'xtol': xtol, + 'ftol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'direc': direc, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_powell(func, x0, args, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_powell(func, x0, args=(), callback=None, bounds=None, + xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, + disp=False, direc=None, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + modified Powell algorithm. + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is a 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where ``n`` is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + The present documentation is specific to ``method='powell'``, but other + options are available. See documentation for `scipy.optimize.minimize`. + bounds : sequence or `Bounds`, optional + Bounds on decision variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + If bounds are not provided, then an unbounded line search will be used. + If bounds are provided and the initial guess is within the bounds, then + every function evaluation throughout the minimization procedure will be + within the bounds. If bounds are provided, the initial guess is outside + the bounds, and `direc` is full rank (or left to default), then some + function evaluations during the first iteration may be outside the + bounds, but every function evaluation after the first iteration will be + within the bounds. If `direc` is not full rank, then some parameters + may not be optimized and the solution is not guaranteed to be within + the bounds. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. Depending on the + method each iteration may use several function evaluations. + disp : bool + Set to True to print convergence messages. + + See method-specific options for ``method='powell'`` below. + callback : callable, optional + Called after each iteration. The signature is: + + ``callback(xk)`` + + where ``xk`` is the current parameter vector. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + ftol : float + Relative error in ``fun(xopt)`` acceptable for convergence. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*1000``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + direc : ndarray + Initial set of direction vectors for the Powell method. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x = asarray(x0).flatten() + if retall: + allvecs = [x] + N = len(x) + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 1000 + maxfun = N * 1000 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 1000 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 1000 + else: + maxfun = np.inf + + # we need to use a mutable object here that we can update in the + # wrapper function + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + if direc is None: + direc = eye(N, dtype=float) + else: + direc = asarray(direc, dtype=float) + if np.linalg.matrix_rank(direc) != direc.shape[0]: + warnings.warn("direc input is not full rank, some parameters may " + "not be optimized", + OptimizeWarning, stacklevel=3) + + if bounds is None: + # don't make these arrays of all +/- inf. because + # _linesearch_powell will do an unnecessary check of all the elements. + # just keep them None, _linesearch_powell will not have to check + # all the elements. + lower_bound, upper_bound = None, None + else: + # bounds is standardized in _minimize.py. + lower_bound, upper_bound = bounds.lb, bounds.ub + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + fval = squeeze(func(x)) + x1 = x.copy() + iter = 0 + while True: + try: + fx = fval + bigind = 0 + delta = 0.0 + for i in range(N): + direc1 = direc[i] + fx2 = fval + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + if (fx2 - fval) > delta: + delta = fx2 - fval + bigind = i + iter += 1 + if retall: + allvecs.append(x) + intermediate_result = OptimizeResult(x=x, fun=fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20 + if 2.0 * (fx - fval) <= bnd: + break + if fcalls[0] >= maxfun: + break + if iter >= maxiter: + break + if np.isnan(fx) and np.isnan(fval): + # Ended up in a nan-region: bail out + break + + # Construct the extrapolated point + direc1 = x - x1 + x1 = x.copy() + # make sure that we don't go outside the bounds when extrapolating + if lower_bound is None and upper_bound is None: + lmax = 1 + else: + _, lmax = _line_for_search(x, direc1, lower_bound, upper_bound) + x2 = x + min(lmax, 1) * direc1 + fx2 = squeeze(func(x2)) + + if (fx > fx2): + t = 2.0*(fx + fx2 - 2.0*fval) + temp = (fx - fval - delta) + t *= temp*temp + temp = fx - fx2 + t -= delta*temp*temp + if t < 0.0: + fval, x, direc1 = _linesearch_powell( + func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval + ) + if np.any(direc1): + direc[bigind] = direc[-1] + direc[-1] = direc1 + except _MaxFuncCallError: + break + + warnflag = 0 + msg = _status_message['success'] + # out of bounds is more urgent than exceeding function evals or iters, + # but I don't want to cause inconsistencies by changing the + # established warning flags for maxfev and maxiter, so the out of bounds + # warning flag becomes 3, but is checked for first. + if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)): + warnflag = 4 + msg = _status_message['out_of_bounds'] + elif fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + elif iter >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + elif np.isnan(fval) or np.isnan(x).any(): + warnflag = 3 + msg = _status_message['nan'] + + if disp: + _print_success_message_or_warn(warnflag, msg, RuntimeWarning) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iter) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x) + if retall: + result['allvecs'] = allvecs + return result + + +def _endprint(x, flag, fval, maxfun, xtol, disp): + if flag == 0: + if disp > 1: + print("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + "(using xtol = ", xtol, ")") + return + + if flag == 1: + msg = ("\nMaximum number of function evaluations exceeded --- " + "increase maxfun argument.\n") + elif flag == 2: + msg = "\n{}".format(_status_message['nan']) + + _print_success_message_or_warn(flag, msg) + return + + +def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, + disp=False, workers=1): + """Minimize a function over a given range by brute force. + + Uses the "brute force" method, i.e., computes the function's value + at each point of a multidimensional grid of points, to find the global + minimum of the function. + + The function is evaluated everywhere in the range with the datatype of the + first call to the function, as enforced by the ``vectorize`` NumPy + function. The value and type of the function evaluation returned when + ``full_output=True`` are affected in addition by the ``finish`` argument + (see Notes). + + The brute force approach is inefficient because the number of grid points + increases exponentially - the number of grid points to evaluate is + ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even + moderately sized problems can take a long time to run, and/or run into + memory limitations. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the + form ``f(x, *args)``, where ``x`` is the argument in + the form of a 1-D array and ``args`` is a tuple of any + additional fixed parameters needed to completely specify + the function. + ranges : tuple + Each component of the `ranges` tuple must be either a + "slice object" or a range tuple of the form ``(low, high)``. + The program uses these to create the grid of points on which + the objective function will be computed. See `Note 2` for + more detail. + args : tuple, optional + Any additional fixed parameters needed to completely specify + the function. + Ns : int, optional + Number of grid points along the axes, if not otherwise + specified. See `Note2`. + full_output : bool, optional + If True, return the evaluation grid and the objective function's + values on it. + finish : callable, optional + An optimization function that is called with the result of brute force + minimization as initial guess. `finish` should take `func` and + the initial guess as positional arguments, and take `args` as + keyword arguments. It may additionally take `full_output` + and/or `disp` as keyword arguments. Use None if no "polishing" + function is to be used. See Notes for more details. + disp : bool, optional + Set to True to print convergence messages from the `finish` callable. + workers : int or map-like callable, optional + If `workers` is an int the grid is subdivided into `workers` + sections and evaluated in parallel (uses + `multiprocessing.Pool `). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the grid in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.3.0 + + Returns + ------- + x0 : ndarray + A 1-D array containing the coordinates of a point at which the + objective function had its minimum value. (See `Note 1` for + which point is returned.) + fval : float + Function value at the point `x0`. (Returned when `full_output` is + True.) + grid : tuple + Representation of the evaluation grid. It has the same + length as `x0`. (Returned when `full_output` is True.) + Jout : ndarray + Function values at each point of the evaluation + grid, i.e., ``Jout = func(*grid)``. (Returned + when `full_output` is True.) + + See Also + -------- + basinhopping, differential_evolution + + Notes + ----- + *Note 1*: The program finds the gridpoint at which the lowest value + of the objective function occurs. If `finish` is None, that is the + point returned. When the global minimum occurs within (or not very far + outside) the grid's boundaries, and the grid is fine enough, that + point will be in the neighborhood of the global minimum. + + However, users often employ some other optimization program to + "polish" the gridpoint values, i.e., to seek a more precise + (local) minimum near `brute's` best gridpoint. + The `brute` function's `finish` option provides a convenient way to do + that. Any polishing program used must take `brute's` output as its + initial guess as a positional argument, and take `brute's` input values + for `args` as keyword arguments, otherwise an error will be raised. + It may additionally take `full_output` and/or `disp` as keyword arguments. + + `brute` assumes that the `finish` function returns either an + `OptimizeResult` object or a tuple in the form: + ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing + value of the argument, ``Jmin`` is the minimum value of the objective + function, "..." may be some other returned values (which are not used + by `brute`), and ``statuscode`` is the status code of the `finish` program. + + Note that when `finish` is not None, the values returned are those + of the `finish` program, *not* the gridpoint ones. Consequently, + while `brute` confines its search to the input grid points, + the `finish` program's results usually will not coincide with any + gridpoint, and may fall outside the grid's boundary. Thus, if a + minimum only needs to be found over the provided grid points, make + sure to pass in `finish=None`. + + *Note 2*: The grid of points is a `numpy.mgrid` object. + For `brute` the `ranges` and `Ns` inputs have the following effect. + Each component of the `ranges` tuple can be either a slice object or a + two-tuple giving a range of values, such as (0, 5). If the component is a + slice object, `brute` uses it directly. If the component is a two-tuple + range, `brute` internally converts it to a slice object that interpolates + `Ns` points from its low-value to its high-value, inclusive. + + Examples + -------- + We illustrate the use of `brute` to seek the global minimum of a function + of two variables that is given as the sum of a positive-definite + quadratic and two deep "Gaussian-shaped" craters. Specifically, define + the objective function `f` as the sum of three other functions, + ``f = f1 + f2 + f3``. We suppose each of these has a signature + ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions + are as defined below. + + >>> import numpy as np + >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + >>> def f1(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + >>> def f2(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + >>> def f3(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + >>> def f(z, *params): + ... return f1(z, *params) + f2(z, *params) + f3(z, *params) + + Thus, the objective function may have local minima near the minimum + of each of the three functions of which it is composed. To + use `fmin` to polish its gridpoint result, we may then continue as + follows: + + >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + >>> from scipy import optimize + >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, + ... finish=optimize.fmin) + >>> resbrute[0] # global minimum + array([-1.05665192, 1.80834843]) + >>> resbrute[1] # function value at global minimum + -3.4085818767 + + Note that if `finish` had been set to None, we would have gotten the + gridpoint [-1.0 1.75] where the rounded function value is -2.892. + + """ + N = len(ranges) + if N > 40: + raise ValueError("Brute Force not possible with more " + "than 40 variables.") + lrange = list(ranges) + for k in range(N): + if not isinstance(lrange[k], slice): + if len(lrange[k]) < 3: + lrange[k] = tuple(lrange[k]) + (complex(Ns),) + lrange[k] = slice(*lrange[k]) + if (N == 1): + lrange = lrange[0] + + grid = np.mgrid[lrange] + + # obtain an array of parameters that is iterable by a map-like callable + inpt_shape = grid.shape + if (N > 1): + grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T + + if not np.iterable(args): + args = (args,) + + wrapped_func = _Brute_Wrapper(func, args) + + # iterate over input arrays, possibly in parallel + with MapWrapper(pool=workers) as mapper: + Jout = np.array(list(mapper(wrapped_func, grid))) + if (N == 1): + grid = (grid,) + Jout = np.squeeze(Jout) + elif (N > 1): + Jout = np.reshape(Jout, inpt_shape[1:]) + grid = np.reshape(grid.T, inpt_shape) + + Nshape = shape(Jout) + + indx = argmin(Jout.ravel(), axis=-1) + Nindx = np.empty(N, int) + xmin = np.empty(N, float) + for k in range(N - 1, -1, -1): + thisN = Nshape[k] + Nindx[k] = indx % Nshape[k] + indx = indx // thisN + for k in range(N): + xmin[k] = grid[k][tuple(Nindx)] + + Jmin = Jout[tuple(Nindx)] + if (N == 1): + grid = grid[0] + xmin = xmin[0] + + if callable(finish): + # set up kwargs for `finish` function + finish_args = _getfullargspec(finish).args + finish_kwargs = dict() + if 'full_output' in finish_args: + finish_kwargs['full_output'] = 1 + if 'disp' in finish_args: + finish_kwargs['disp'] = disp + elif 'options' in finish_args: + # pass 'disp' as `options` + # (e.g., if `finish` is `minimize`) + finish_kwargs['options'] = {'disp': disp} + + # run minimizer + res = finish(func, xmin, args=args, **finish_kwargs) + + if isinstance(res, OptimizeResult): + xmin = res.x + Jmin = res.fun + success = res.success + else: + xmin = res[0] + Jmin = res[1] + success = res[-1] == 0 + if not success: + if disp: + warnings.warn("Either final optimization did not succeed or `finish` " + "does not return `statuscode` as its last argument.", + RuntimeWarning, stacklevel=2) + + if full_output: + return xmin, Jmin, grid, Jout + else: + return xmin + + +class _Brute_Wrapper: + """ + Object to wrap user cost function for optimize.brute, allowing picklability + """ + + def __init__(self, f, args): + self.f = f + self.args = [] if args is None else args + + def __call__(self, x): + # flatten needed for one dimensional case. + return self.f(np.asarray(x).flatten(), *self.args) + + +def show_options(solver=None, method=None, disp=True): + """ + Show documentation for additional options of optimization solvers. + + These are method-specific options that can be supplied through the + ``options`` dict. + + Parameters + ---------- + solver : str + Type of optimization solver. One of 'minimize', 'minimize_scalar', + 'root', 'root_scalar', 'linprog', or 'quadratic_assignment'. + method : str, optional + If not given, shows all methods of the specified solver. Otherwise, + show only the options for the specified method. Valid values + corresponds to methods' names of respective solver (e.g., 'BFGS' for + 'minimize'). + disp : bool, optional + Whether to print the result rather than returning it. + + Returns + ------- + text + Either None (for disp=True) or the text string (disp=False) + + Notes + ----- + The solver-specific methods are: + + `scipy.optimize.minimize` + + - :ref:`Nelder-Mead ` + - :ref:`Powell ` + - :ref:`CG ` + - :ref:`BFGS ` + - :ref:`Newton-CG ` + - :ref:`L-BFGS-B ` + - :ref:`TNC ` + - :ref:`COBYLA ` + - :ref:`SLSQP ` + - :ref:`dogleg ` + - :ref:`trust-ncg ` + + `scipy.optimize.root` + + - :ref:`hybr ` + - :ref:`lm ` + - :ref:`broyden1 ` + - :ref:`broyden2 ` + - :ref:`anderson ` + - :ref:`linearmixing ` + - :ref:`diagbroyden ` + - :ref:`excitingmixing ` + - :ref:`krylov ` + - :ref:`df-sane ` + + `scipy.optimize.minimize_scalar` + + - :ref:`brent ` + - :ref:`golden ` + - :ref:`bounded ` + + `scipy.optimize.root_scalar` + + - :ref:`bisect ` + - :ref:`brentq ` + - :ref:`brenth ` + - :ref:`ridder ` + - :ref:`toms748 ` + - :ref:`newton ` + - :ref:`secant ` + - :ref:`halley ` + + `scipy.optimize.linprog` + + - :ref:`simplex ` + - :ref:`interior-point ` + - :ref:`revised simplex ` + - :ref:`highs ` + - :ref:`highs-ds ` + - :ref:`highs-ipm ` + + `scipy.optimize.quadratic_assignment` + + - :ref:`faq ` + - :ref:`2opt ` + + Examples + -------- + We can print documentations of a solver in stdout: + + >>> from scipy.optimize import show_options + >>> show_options(solver="minimize") + ... + + Specifying a method is possible: + + >>> show_options(solver="minimize", method="Nelder-Mead") + ... + + We can also get the documentations as a string: + + >>> show_options(solver="minimize", method="Nelder-Mead", disp=False) + Minimization of scalar function of one or more variables using the ... + + """ + import textwrap + + doc_routines = { + 'minimize': ( + ('bfgs', 'scipy.optimize._optimize._minimize_bfgs'), + ('cg', 'scipy.optimize._optimize._minimize_cg'), + ('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'), + ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), + ('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'), + ('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'), + ('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'), + ('powell', 'scipy.optimize._optimize._minimize_powell'), + ('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'), + ('tnc', 'scipy.optimize._tnc._minimize_tnc'), + ('trust-ncg', + 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), + ('trust-constr', + 'scipy.optimize._trustregion_constr.' + '_minimize_trustregion_constr'), + ('trust-exact', + 'scipy.optimize._trustregion_exact._minimize_trustregion_exact'), + ('trust-krylov', + 'scipy.optimize._trustregion_krylov._minimize_trust_krylov'), + ), + 'root': ( + ('hybr', 'scipy.optimize._minpack_py._root_hybr'), + ('lm', 'scipy.optimize._root._root_leastsq'), + ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), + ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), + ('anderson', 'scipy.optimize._root._root_anderson_doc'), + ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), + ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), + ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), + ('krylov', 'scipy.optimize._root._root_krylov_doc'), + ('df-sane', 'scipy.optimize._spectral._root_df_sane'), + ), + 'root_scalar': ( + ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'), + ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'), + ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'), + ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'), + ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'), + ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'), + ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'), + ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'), + ), + 'linprog': ( + ('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'), + ('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'), + ('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'), + ('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'), + ('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'), + ('highs', 'scipy.optimize._linprog._linprog_highs_doc'), + ), + 'quadratic_assignment': ( + ('faq', 'scipy.optimize._qap._quadratic_assignment_faq'), + ('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'), + ), + 'minimize_scalar': ( + ('brent', 'scipy.optimize._optimize._minimize_scalar_brent'), + ('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'), + ('golden', 'scipy.optimize._optimize._minimize_scalar_golden'), + ), + } + + if solver is None: + text = ["\n\n\n========\n", "minimize\n", "========\n"] + text.append(show_options('minimize', disp=False)) + text.extend(["\n\n===============\n", "minimize_scalar\n", + "===============\n"]) + text.append(show_options('minimize_scalar', disp=False)) + text.extend(["\n\n\n====\n", "root\n", + "====\n"]) + text.append(show_options('root', disp=False)) + text.extend(['\n\n\n=======\n', 'linprog\n', + '=======\n']) + text.append(show_options('linprog', disp=False)) + text = "".join(text) + else: + solver = solver.lower() + if solver not in doc_routines: + raise ValueError(f'Unknown solver {solver!r}') + + if method is None: + text = [] + for name, _ in doc_routines[solver]: + text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) + text.append(show_options(solver, name, disp=False)) + text = "".join(text) + else: + method = method.lower() + methods = dict(doc_routines[solver]) + if method not in methods: + raise ValueError(f"Unknown method {method!r}") + name = methods[method] + + # Import function object + parts = name.split('.') + mod_name = ".".join(parts[:-1]) + __import__(mod_name) + obj = getattr(sys.modules[mod_name], parts[-1]) + + # Get doc + doc = obj.__doc__ + if doc is not None: + text = textwrap.dedent(doc).strip() + else: + text = "" + + if disp: + print(text) + return + else: + return text diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f28cf6012007f56f5e8d1c9d057f042b4465aec6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..cb81ad1696b768d2304b2fc42a80cc9678cbde00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py @@ -0,0 +1,522 @@ +""" +Routines for removing redundant (linearly dependent) equations from linear +programming equality constraints. +""" +# Author: Matt Haberland + +import numpy as np +from scipy.linalg import svd +from scipy.linalg.interpolative import interp_decomp +import scipy +from scipy.linalg.blas import dtrsm + + +def _row_count(A): + """ + Counts the number of nonzeros in each row of input array A. + Nonzeros are defined as any element with absolute value greater than + tol = 1e-13. This value should probably be an input to the function. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + + Returns + ------- + rowcount : 1-D array + Number of nonzeros in each row of A + + """ + tol = 1e-13 + return np.array((abs(A) > tol).sum(axis=1)).flatten() + + +def _get_densest(A, eligibleRows): + """ + Returns the index of the densest row of A. Ignores rows that are not + eligible for consideration. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + eligibleRows : 1-D logical array + Values indicate whether the corresponding row of A is eligible + to be considered + + Returns + ------- + i_densest : int + Index of the densest row in A eligible for consideration + + """ + rowCounts = _row_count(A) + return np.argmax(rowCounts * eligibleRows) + + +def _remove_zero_rows(A, b): + """ + Eliminates trivial equations from system of equations defined by Ax = b + and identifies trivial infeasibilities + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the removal operation + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + status = 0 + message = "" + i_zero = _row_count(A) == 0 + A = A[np.logical_not(i_zero), :] + if not np.allclose(b[i_zero], 0): + status = 2 + message = "There is a zero row in A_eq with a nonzero corresponding " \ + "entry in b_eq. The problem is infeasible." + b = b[np.logical_not(i_zero)] + return A, b, status, message + + +def bg_update_dense(plu, perm_r, v, j): + LU, p = plu + + vperm = v[perm_r] + u = dtrsm(1, LU, vperm, lower=1, diag=1) + LU[:j+1, j] = u[:j+1] + l = u[j+1:] + piv = LU[j, j] + LU[j+1:, j] += (l/piv) + return LU, p + + +def _remove_redundancy_pivot_dense(A, rhs, true_rank=None): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + d = [] # Indices of dependent rows + perm_r = None + + A_orig = A + A = np.zeros((m, m + n), order='F') + np.fill_diagonal(A, 1) + A[:, m:] = A_orig + e = np.zeros(m) + + js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis + # manual masking was faster than masked array + js_mask = np.ones(js_candidates.shape, dtype=bool) + + # Implements basic algorithm from [2] + # Uses some of the suggested improvements (removing zero rows and + # Bartels-Golub update idea). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis are only really useful if + # the matrix is sparse. + + lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial + perm_r = lu[1] + for i in v: + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + try: # fails for i==0 and any time it gets ill-conditioned + j = b[i-1] + lu = bg_update_dense(lu, perm_r, A[:, j], i-1) + except Exception: + lu = scipy.linalg.lu_factor(A[:, b]) + LU, p = lu + perm_r = list(range(m)) + for i1, i2 in enumerate(p): + perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] + + pi = scipy.linalg.lu_solve(lu, e, trans=1) + + js = js_candidates[js_mask] + batch = 50 + + # This is a tiny bit faster than looping over columns individually, + # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: + for j_index in range(0, len(js), batch): + j_indices = js[j_index: min(j_index+batch, len(js))] + + c = abs(A[:, j_indices].transpose().dot(pi)) + if (c > tolapiv).any(): + j = js[j_index + np.argmax(c)] # very independent column + b[i] = j + js_mask[j-m] = False + break + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + if true_rank is not None and len(d) == m - true_rank: + break # found all redundancies + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_pivot_sparse(A, rhs): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + k = set(range(m, m+n)) # Structural column indices. + d = [] # Indices of dependent rows + + A_orig = A + A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() + e = np.zeros(m) + + # Implements basic algorithm from [2] + # Uses only one of the suggested improvements (removing zero rows). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis sound useful, but the + # description of the procedure seems to assume a lot of familiarity with + # the subject; it is not very explicit. I already went through enough + # trouble getting the basic algorithm working, so I was not interested in + # trying to decipher this, too. (Overall, the paper is fraught with + # mistakes and ambiguities - which is strange, because the rest of + # Andersen's papers are quite good.) + # I tried and tried and tried to improve performance using the + # Bartels-Golub update. It works, but it's only practical if the LU + # factorization can be specialized as described, and that is not possible + # until the SciPy SuperLU interface permits control over column + # permutation - see issue #7700. + + for i in v: + B = A[:, b] + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) + + js = list(k-set(b)) # not efficient, but this is not the time sink... + + # Due to overhead, it tends to be faster (for problems tested) to + # compute the full matrix-vector product rather than individual + # vector-vector products (with the chance of terminating as soon + # as any are nonzero). For very large matrices, it might be worth + # it to compute, say, 100 or 1000 at a time and stop when a nonzero + # is found. + + c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] + if len(c) > 0: # independent + j = js[c[0]] + # in a previous commit, the previous line was changed to choose + # index j corresponding with the maximum dot product. + # While this avoided issues with almost + # singular matrices, it slowed the routine in most NETLIB tests. + # I think this is because these columns were denser than the + # first column with nonzero dot product (c[0]). + # It would be nice to have a heuristic that balances sparsity with + # high dot product, but I don't think it's worth the time to + # develop one right now. Bartels-Golub update is a much higher + # priority. + b[i] = j # replace artificial column + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1 + bnorm) > tolprimal: + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_svd(A, b): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + A, b, status, message = _remove_zero_rows(A, b) + + if status != 0: + return A, b, status, message + + U, s, Vh = svd(A) + eps = np.finfo(float).eps + tol = s.max() * max(A.shape) * eps + + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + # this algorithm is faster than that of [2] when the nullspace is small + # but it could probably be improvement by randomized algorithms and with + # a sparse implementation. + # it relies on repeated singular value decomposition to find linearly + # dependent rows (as identified by columns of U that correspond with zero + # singular values). Unfortunately, only one row can be removed per + # decomposition (I tried otherwise; doing so can cause problems.) + # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds + # but that function is unreliable at finding singular values near zero. + # Finding max eigenvalue L of A A^T, then largest eigenvalue (and + # associated eigenvector) of -A A^T + L I (I is identity) via power + # iteration would also work in theory, but is only efficient if the + # smallest nonzero eigenvalue of A A^T is close to the largest nonzero + # eigenvalue. + + while abs(s_min) < tol: + v = U[:, -1] # TODO: return these so user can eliminate from problem? + # rows need to be represented in significant amount + eligibleRows = np.abs(v) > tol * 10e6 + if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): + status = 4 + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + break + if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349 + status = 2 + message = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + break + + i_remove = _get_densest(A, eligibleRows) + A = np.delete(A, i_remove, axis=0) + b = np.delete(b, i_remove) + U, s, Vh = svd(A) + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + return A, b, status, message + + +def _remove_redundancy_id(A, rhs, rank=None, randomized=True): + """Eliminates redundant equations from a system of equations. + + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + rank : int, optional + The rank of A + randomized: bool, optional + True for randomized interpolative decomposition + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + k = rank + if rank is None: + k = np.linalg.matrix_rank(A) + + idx, proj = interp_decomp(A.T, k, rand=randomized) + + # first k entries in idx are indices of the independent rows + # remaining entries are the indices of the m-k dependent rows + # proj provides a linear combinations of rows of A2 that form the + # remaining m-k (dependent) rows. The same linear combination of entries + # in rhs2 must give the remaining m-k entries. If not, the system is + # inconsistent, and the problem is infeasible. + if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]): + status = 2 + message = inconsistent + + # sort indices because the other redundancy removal routines leave rows + # in original order and tests were written with that in mind + idx = sorted(idx[:k]) + A2 = A[idx, :] + rhs2 = rhs[idx] + return A2, rhs2, status, message diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root.py new file mode 100644 index 0000000000000000000000000000000000000000..613ccd82a32ef08f90a65b92ea61b597e8d8113f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root.py @@ -0,0 +1,711 @@ +""" +Unified interfaces to root finding algorithms. + +Functions +--------- +- root : find a root of a vector function. +""" +__all__ = ['root'] + +import numpy as np + +from warnings import warn + +from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options +from ._minpack_py import _root_hybr, leastsq +from ._spectral import _root_df_sane +from . import _nonlin as nonlin + + +ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', + 'df-sane'] + + +def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, + options=None): + r""" + Find a root of a vector function. + + Parameters + ---------- + fun : callable + A vector function to find a root of. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function and its Jacobian. + method : str, optional + Type of solver. Should be one of + + - 'hybr' :ref:`(see here) ` + - 'lm' :ref:`(see here) ` + - 'broyden1' :ref:`(see here) ` + - 'broyden2' :ref:`(see here) ` + - 'anderson' :ref:`(see here) ` + - 'linearmixing' :ref:`(see here) ` + - 'diagbroyden' :ref:`(see here) ` + - 'excitingmixing' :ref:`(see here) ` + - 'krylov' :ref:`(see here) ` + - 'df-sane' :ref:`(see here) ` + + jac : bool or callable, optional + If `jac` is a Boolean and is True, `fun` is assumed to return the + value of Jacobian along with the objective function. If False, the + Jacobian will be estimated numerically. + `jac` can also be a callable returning the Jacobian of `fun`. In + this case, it must accept the same arguments as `fun`. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. For all methods but 'hybr' and 'lm'. + options : dict, optional + A dictionary of solver options. E.g., `xtol` or `maxiter`, see + :obj:`show_options()` for details. + + Returns + ------- + sol : OptimizeResult + The solution represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the algorithm exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *hybr*. + + Method *hybr* uses a modification of the Powell hybrid method as + implemented in MINPACK [1]_. + + Method *lm* solves the system of nonlinear equations in a least squares + sense using a modification of the Levenberg-Marquardt algorithm as + implemented in MINPACK [1]_. + + Method *df-sane* is a derivative-free spectral method. [3]_ + + Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, + *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, + with backtracking or full line searches [2]_. Each method corresponds + to a particular Jacobian approximations. + + - Method *broyden1* uses Broyden's first Jacobian approximation, it is + known as Broyden's good method. + - Method *broyden2* uses Broyden's second Jacobian approximation, it + is known as Broyden's bad method. + - Method *anderson* uses (extended) Anderson mixing. + - Method *Krylov* uses Krylov approximation for inverse Jacobian. It + is suitable for large-scale problem. + - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. + - Method *linearmixing* uses a scalar Jacobian approximation. + - Method *excitingmixing* uses a tuned diagonal Jacobian + approximation. + + .. warning:: + + The algorithms implemented for methods *diagbroyden*, + *linearmixing* and *excitingmixing* may be useful for specific + problems, but whether they will work may depend strongly on the + problem. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. + 1980. User Guide for MINPACK-1. + .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear + Equations. Society for Industrial and Applied Mathematics. + + .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). + + Examples + -------- + The following functions define a system of nonlinear equations and its + jacobian. + + >>> import numpy as np + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + >>> def jac(x): + ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, + ... -1.5 * (x[0] - x[1])**2], + ... [-1.5 * (x[1] - x[0])**2, + ... 1 + 1.5 * (x[1] - x[0])**2]]) + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') + >>> sol.x + array([ 0.8411639, 0.1588361]) + + **Large problem** + + Suppose that we needed to solve the following integrodifferential + equation on the square :math:`[0,1]\times[0,1]`: + + .. math:: + + \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 + + with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of + the square. + + The solution can be found using the ``method='krylov'`` solver: + + >>> from scipy import optimize + >>> # parameters + >>> nx, ny = 75, 75 + >>> hx, hy = 1./(nx-1), 1./(ny-1) + + >>> P_left, P_right = 0, 0 + >>> P_top, P_bottom = 1, 0 + + >>> def residual(P): + ... d2x = np.zeros_like(P) + ... d2y = np.zeros_like(P) + ... + ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx + ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx + ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx + ... + ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy + ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy + ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy + ... + ... return d2x + d2y - 10*np.cosh(P).mean()**2 + + >>> guess = np.zeros((nx, ny), float) + >>> sol = optimize.root(residual, guess, method='krylov') + >>> print('Residual: %g' % abs(residual(sol.x)).max()) + Residual: 5.7972e-06 # may vary + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)] + >>> plt.pcolormesh(x, y, sol.x, shading='gouraud') + >>> plt.colorbar() + >>> plt.show() + + """ + if not isinstance(args, tuple): + args = (args,) + + meth = method.lower() + if options is None: + options = {} + + if callback is not None and meth in ('hybr', 'lm'): + warn('Method %s does not accept callback.' % method, + RuntimeWarning, stacklevel=2) + + # fun also returns the Jacobian + if not callable(jac) and meth in ('hybr', 'lm'): + if bool(jac): + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth in ('hybr', 'lm'): + options.setdefault('xtol', tol) + elif meth in ('df-sane',): + options.setdefault('ftol', tol) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + options.setdefault('xtol', tol) + options.setdefault('xatol', np.inf) + options.setdefault('ftol', np.inf) + options.setdefault('fatol', np.inf) + + if meth == 'hybr': + sol = _root_hybr(fun, x0, args=args, jac=jac, **options) + elif meth == 'lm': + sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) + elif meth == 'df-sane': + _warn_jac_unused(jac, method) + sol = _root_df_sane(fun, x0, args=args, callback=callback, + **options) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + _warn_jac_unused(jac, method) + sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, + _method=meth, _callback=callback, + **options) + else: + raise ValueError('Unknown solver %s' % method) + + return sol + + +def _warn_jac_unused(jac, method): + if jac is not None: + warn(f'Method {method} does not use the jacobian (jac).', + RuntimeWarning, stacklevel=2) + + +def _root_leastsq(fun, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, + gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, + **unknown_options): + """ + Solve for least squares with Levenberg-Marquardt + + Options + ------- + col_deriv : bool + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float + Relative error desired in the sum of squares. + xtol : float + Relative error desired in the approximate solution. + gtol : float + Orthogonality desired between the function vector and the columns + of the Jacobian. + maxiter : int + The maximum number of calls to the function. If zero, then + 100*(N+1) is the maximum where N is the number of elements in x0. + epsfcn : float + A suitable step length for the forward-difference approximation of + the Jacobian (for Dfun=None). If epsfcn is less than the machine + precision, it is assumed that the relative errors in the functions + are of the order of the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the variables. + """ + + _check_unknown_options(unknown_options) + x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac, + full_output=True, + col_deriv=col_deriv, xtol=xtol, + ftol=ftol, gtol=gtol, + maxfev=maxiter, epsfcn=eps, + factor=factor, diag=diag) + sol = OptimizeResult(x=x, message=msg, status=ier, + success=ier in (1, 2, 3, 4), cov_x=cov_x, + fun=info.pop('fvec'), method="lm") + sol.update(info) + return sol + + +def _root_nonlin_solve(fun, x0, args=(), jac=None, + _callback=None, _method=None, + nit=None, disp=False, maxiter=None, + ftol=None, fatol=None, xtol=None, xatol=None, + tol_norm=None, line_search='armijo', jac_options=None, + **unknown_options): + _check_unknown_options(unknown_options) + + f_tol = fatol + f_rtol = ftol + x_tol = xatol + x_rtol = xtol + verbose = disp + if jac_options is None: + jac_options = dict() + + jacobian = {'broyden1': nonlin.BroydenFirst, + 'broyden2': nonlin.BroydenSecond, + 'anderson': nonlin.Anderson, + 'linearmixing': nonlin.LinearMixing, + 'diagbroyden': nonlin.DiagBroyden, + 'excitingmixing': nonlin.ExcitingMixing, + 'krylov': nonlin.KrylovJacobian + }[_method] + + if args: + if jac is True: + def f(x): + return fun(x, *args)[0] + else: + def f(x): + return fun(x, *args) + else: + f = fun + + x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), + iter=nit, verbose=verbose, + maxiter=maxiter, f_tol=f_tol, + f_rtol=f_rtol, x_tol=x_tol, + x_rtol=x_rtol, tol_norm=tol_norm, + line_search=line_search, + callback=_callback, full_output=True, + raise_exception=False) + sol = OptimizeResult(x=x, method=_method) + sol.update(info) + return sol + +def _root_broyden1_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart`` + Drop all matrix columns. Has no + extra parameters. + - ``simple`` + Drop oldest matrix column. Has no + extra parameters. + - ``svd`` + Keep only the most significant SVD + components. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + + Examples + -------- + >>> def func(x): + ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] + ... + >>> from scipy import optimize + >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14) + >>> x = res.x + >>> x + array([4.04674914, 3.91158389, 2.71791677, 1.61756251]) + >>> np.cos(x) + x[::-1] + array([1., 2., 3., 4.]) + + """ + pass + +def _root_broyden2_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart`` + Drop all matrix columns. Has no + extra parameters. + - ``simple`` + Drop oldest matrix column. Has no + extra parameters. + - ``svd`` + Keep only the most significant SVD + components. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """ + pass + +def _root_anderson_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + """ + pass + +def _root_linearmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_diagbroyden_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_excitingmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + """ + pass + +def _root_krylov_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=jac.inverse). + + If the preconditioner has a method named 'update', it will + be called as ``update(x, f)`` after each nonlinear step, + with ``x`` giving the current point, and ``f`` the current + function value. + inner_tol, inner_maxiter, ... + Parameters to pass on to the "inner" Krylov solver. + See `scipy.sparse.linalg.gmres` for details. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear + iterations. + + See `scipy.sparse.linalg.lgmres` for details. + """ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py new file mode 100644 index 0000000000000000000000000000000000000000..550098bbe677825b34e19aec29e340143b3522cc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py @@ -0,0 +1,525 @@ +""" +Unified interfaces to root finding algorithms for real or complex +scalar functions. + +Functions +--------- +- root : find a root of a scalar function. +""" +import numpy as np + +from . import _zeros_py as optzeros +from ._numdiff import approx_derivative + +__all__ = ['root_scalar'] + +ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748', + 'newton', 'secant', 'halley'] + + +class MemoizeDer: + """Decorator that caches the value and derivative(s) of function each + time it is called. + + This is a simplistic memoizer that calls and caches a single value + of `f(x, *args)`. + It assumes that `args` does not change between invocations. + It supports the use case of a root-finder where `args` is fixed, + `x` changes, and only rarely, if at all, does x assume the same value + more than once.""" + def __init__(self, fun): + self.fun = fun + self.vals = None + self.x = None + self.n_calls = 0 + + def __call__(self, x, *args): + r"""Calculate f or use cached value if available""" + # Derivative may be requested before the function itself, always check + if self.vals is None or x != self.x: + fg = self.fun(x, *args) + self.x = x + self.n_calls += 1 + self.vals = fg[:] + return self.vals[0] + + def fprime(self, x, *args): + r"""Calculate f' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[1] + + def fprime2(self, x, *args): + r"""Calculate f'' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[2] + + def ncalls(self): + return self.n_calls + + +def root_scalar(f, args=(), method=None, bracket=None, + fprime=None, fprime2=None, + x0=None, x1=None, + xtol=None, rtol=None, maxiter=None, + options=None): + """ + Find a root of a scalar function. + + Parameters + ---------- + f : callable + A function to find a root of. + args : tuple, optional + Extra arguments passed to the objective function and its derivative(s). + method : str, optional + Type of solver. Should be one of + + - 'bisect' :ref:`(see here) ` + - 'brentq' :ref:`(see here) ` + - 'brenth' :ref:`(see here) ` + - 'ridder' :ref:`(see here) ` + - 'toms748' :ref:`(see here) ` + - 'newton' :ref:`(see here) ` + - 'secant' :ref:`(see here) ` + - 'halley' :ref:`(see here) ` + + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + x0 : float, optional + Initial guess. + x1 : float, optional + A second guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the derivative. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, optional + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the + first and second derivatives. + `fprime2` can also be a callable returning the second derivative of `f`. + In this case, it must accept the same arguments as `f`. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options : dict, optional + A dictionary of solver options. E.g., ``k``, see + :obj:`show_options()` for details. + + Returns + ------- + sol : RootResults + The solution represented as a ``RootResults`` object. + Important attributes are: ``root`` the solution , ``converged`` a + boolean flag indicating if the algorithm exited successfully and + ``flag`` which describes the cause of the termination. See + `RootResults` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + root : Find a root of a vector function. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + The default is to use the best method available for the situation + presented. + If a bracket is provided, it may use one of the bracketing methods. + If a derivative and an initial value are specified, it may + select one of the derivative-based methods. + If no method is judged applicable, it will raise an Exception. + + Arguments for each method are as follows (x=required, o=optional). + + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options | + +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+ + | :ref:`bisect ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brentq ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brenth ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`ridder ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`toms748 ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`secant ` | x | o | | x | o | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`newton ` | x | o | | x | | o | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`halley ` | x | o | | x | | x | x | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + + Examples + -------- + + Find the root of a simple cubic + + >>> from scipy import optimize + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> def fprime(x): + ... return 3*x**2 + + The `brentq` method takes as input a bracket + + >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 10, 11) + + The `newton` method takes as input a single point and uses the + derivative(s). + + >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 22) + + The function can provide the value and derivative(s) in a single call. + + >>> def f_p_pp(x): + ... return (x**3 - 1), 3*x**2, 6*x + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, method='newton' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 11) + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 7, 8) + + + """ # noqa: E501 + if not isinstance(args, tuple): + args = (args,) + + if options is None: + options = {} + + # fun also returns the derivative(s) + is_memoized = False + if fprime2 is not None and not callable(fprime2): + if bool(fprime2): + f = MemoizeDer(f) + is_memoized = True + fprime2 = f.fprime2 + fprime = f.fprime + else: + fprime2 = None + if fprime is not None and not callable(fprime): + if bool(fprime): + f = MemoizeDer(f) + is_memoized = True + fprime = f.fprime + else: + fprime = None + + # respect solver-specific default tolerances - only pass in if actually set + kwargs = {} + for k in ['xtol', 'rtol', 'maxiter']: + v = locals().get(k) + if v is not None: + kwargs[k] = v + + # Set any solver-specific options + if options: + kwargs.update(options) + # Always request full_output from the underlying method as _root_scalar + # always returns a RootResults object + kwargs.update(full_output=True, disp=False) + + # Pick a method if not specified. + # Use the "best" method available for the situation. + if not method: + if bracket: + method = 'brentq' + elif x0 is not None: + if fprime: + if fprime2: + method = 'halley' + else: + method = 'newton' + elif x1 is not None: + method = 'secant' + else: + method = 'newton' + if not method: + raise ValueError('Unable to select a solver as neither bracket ' + 'nor starting point provided.') + + meth = method.lower() + map2underlying = {'halley': 'newton', 'secant': 'newton'} + + try: + methodc = getattr(optzeros, map2underlying.get(meth, meth)) + except AttributeError as e: + raise ValueError('Unknown solver %s' % meth) from e + + if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']: + if not isinstance(bracket, (list, tuple, np.ndarray)): + raise ValueError('Bracket needed for %s' % method) + + a, b = bracket[:2] + try: + r, sol = methodc(f, a, b, args=args, **kwargs) + except ValueError as e: + # gh-17622 fixed some bugs in low-level solvers by raising an error + # (rather than returning incorrect results) when the callable + # returns a NaN. It did so by wrapping the callable rather than + # modifying compiled code, so the iteration count is not available. + if hasattr(e, "_x"): + sol = optzeros.RootResults(root=e._x, + iterations=np.nan, + function_calls=e._function_calls, + flag=str(e), method=method) + else: + raise + + elif meth in ['secant']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None, + x1=x1, **kwargs) + elif meth in ['newton']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + # approximate fprime with finite differences + + def fprime(x, *args): + # `root_scalar` doesn't actually seem to support vectorized + # use of `newton`. In that case, `approx_derivative` will + # always get scalar input. Nonetheless, it always returns an + # array, so we extract the element to produce scalar output. + return approx_derivative(f, x, method='2-point', args=args)[0] + + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None, + **kwargs) + elif meth in ['halley']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + raise ValueError('fprime must be specified for %s' % method) + if not fprime2: + raise ValueError('fprime2 must be specified for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs) + else: + raise ValueError('Unknown solver %s' % method) + + if is_memoized: + # Replace the function_calls count with the memoized count. + # Avoids double and triple-counting. + n_calls = f.n_calls + sol.function_calls = n_calls + + return sol + + +def _root_scalar_brentq_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_brenth_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + +def _root_scalar_toms748_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_secant_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + x1 : float, required + A second guess. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_newton_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivative. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_halley_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivatives. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, required + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, required + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of 1st and 2nd derivatives along with the objective function. + `fprime2` can also be a callable returning the 2nd derivative of `f`. + In this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_ridder_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_bisect_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..61d686d02afdffc01e0d66d538c16de9cb02a0a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo.py @@ -0,0 +1,1595 @@ +"""shgo: The simplicial homology global optimisation algorithm.""" +from collections import namedtuple +import time +import logging +import warnings +import sys + +import numpy as np + +from scipy import spatial +from scipy.optimize import OptimizeResult, minimize, Bounds +from scipy.optimize._optimize import MemoizeJac +from scipy.optimize._constraints import new_bounds_to_old +from scipy.optimize._minimize import standardize_constraints +from scipy._lib._util import _FunctionWrapper + +from scipy.optimize._shgo_lib._complex import Complex + +__all__ = ['shgo'] + + +def shgo( + func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, + minimizer_kwargs=None, options=None, sampling_method='simplicial', *, + workers=1 +): + """ + Finds the global minimum of a function using SHG optimization. + + SHGO stands for "simplicial homology global optimization". + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition. Only for COBYLA, SLSQP and trust-constr. + See the tutorial [5]_ for further details on specifying constraints. + + .. note:: + + Only COBYLA, SLSQP, and trust-constr local minimize methods + currently support constraint arguments. If the ``constraints`` + sequence used in the local optimization problem is not defined in + ``minimizer_kwargs`` and a constrained method is used then the + global ``constraints`` will be used. + (Defining a ``constraints`` sequence in ``minimizer_kwargs`` + means that ``constraints`` will not be added so if equality + constraints and so forth need to be added then the inequality + functions in ``constraints`` need to be added to + ``minimizer_kwargs`` too). + COBYLA only supports inequality constraints. + + .. versionchanged:: 1.11.0 + + ``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`. + + n : int, optional + Number of sampling points used in the construction of the simplicial + complex. For the default ``simplicial`` sampling method 2**dim + 1 + sampling points are generated instead of the default `n=100`. For all + other specified values `n` sampling points are generated. For + ``sobol``, ``halton`` and other arbitrary `sampling_methods` `n=100` or + another specified number of sampling points are generated. + iters : int, optional + Number of iterations used in the construction of the simplicial + complex. Default is 1. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the minimizer + ``scipy.optimize.minimize`` Some important options could be: + + * method : str + The minimization method. If not given, chosen to be one of + BFGS, L-BFGS-B, SLSQP, depending on whether or not the + problem has constraints or bounds. + * args : tuple + Extra arguments passed to the objective function (``func``) and + its derivatives (Jacobian, Hessian). + * options : dict, optional + Note that by default the tolerance is specified as + ``{ftol: 1e-12}`` + + options : dict, optional + A dictionary of solver options. Many of the options specified for the + global routine are also passed to the scipy.optimize.minimize routine. + The options that are also passed to the local routine are marked with + "(L)". + + Stopping criteria, the algorithm will terminate if any of the specified + criteria are met. However, the default algorithm does not require any + to be specified: + + * maxfev : int (L) + Maximum number of function evaluations in the feasible domain. + (Note only methods that support this option will terminate + the routine at precisely exact specified value. Otherwise the + criterion will only terminate during a global iteration) + * f_min + Specify the minimum objective function value, if it is known. + * f_tol : float + Precision goal for the value of f in the stopping + criterion. Note that the global routine will also + terminate if a sampling point in the global routine is + within this tolerance. + * maxiter : int + Maximum number of iterations to perform. + * maxev : int + Maximum number of sampling evaluations to perform (includes + searching in infeasible points). + * maxtime : float + Maximum processing runtime allowed + * minhgrd : int + Minimum homology group rank differential. The homology group of the + objective function is calculated (approximately) during every + iteration. The rank of this group has a one-to-one correspondence + with the number of locally convex subdomains in the objective + function (after adequate sampling points each of these subdomains + contain a unique global minimum). If the difference in the hgr is 0 + between iterations for ``maxhgrd`` specified iterations the + algorithm will terminate. + + Objective function knowledge: + + * symmetry : list or bool + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. If `True` is specified + then all variables will be set symmetric to the first variable. + Default + is set to False. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + * jac : bool or callable, optional + Jacobian (gradient) of objective function. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a + boolean and is True, ``fun`` is assumed to return the gradient + along with the objective function. If False, the gradient will be + estimated numerically. ``jac`` can also be a callable returning the + gradient of the objective. In this case, it must accept the same + arguments as ``fun``. (Passed to `scipy.optimize.minimize` + automatically) + + * hess, hessp : callable, optional + Hessian (matrix of second-order derivatives) of objective function + or Hessian of objective function times an arbitrary vector p. + Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or + ``hess`` needs to be given. If ``hess`` is provided, then + ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is + provided, then the Hessian product will be approximated using + finite differences on ``jac``. ``hessp`` must compute the Hessian + times an arbitrary vector. (Passed to `scipy.optimize.minimize` + automatically) + + Algorithm settings: + + * minimize_every_iter : bool + If True then promising global sampling points will be passed to a + local minimization routine every iteration. If True then only the + final minimizer pool will be run. Defaults to True. + * local_iter : int + Only evaluate a few of the best minimizer pool candidates every + iteration. If False all potential points are passed to the local + minimization routine. + * infty_constraints : bool + If True then any sampling points generated which are outside will + the feasible domain will be saved and given an objective function + value of ``inf``. If False then these points will be discarded. + Using this functionality could lead to higher performance with + respect to function evaluations before the global minimum is found, + specifying False will use less memory at the cost of a slight + decrease in performance. Defaults to True. + + Feedback: + + * disp : bool (L) + Set to True to print convergence messages. + + sampling_method : str or function, optional + Current built in sampling method options are ``halton``, ``sobol`` and + ``simplicial``. The default ``simplicial`` provides + the theoretical guarantee of convergence to the global minimum in + finite time. ``halton`` and ``sobol`` method are faster in terms of + sampling point generation at the cost of the loss of + guaranteed convergence. It is more appropriate for most "easier" + problems where the convergence is relatively fast. + User defined sampling functions must accept two arguments of ``n`` + sampling points of dimension ``dim`` per call and output an array of + sampling points with shape `n x dim`. + + workers : int or map-like callable, optional + Sample and run the local serial minimizations in parallel. + Supply -1 to use all available CPU cores, or an int to use + that many Processes (uses `multiprocessing.Pool `). + + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for parallel evaluation. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.11.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: + ``x`` the solution array corresponding to the global minimum, + ``fun`` the function output at the global solution, + ``xl`` an ordered list of local minima solutions, + ``funl`` the function output at the corresponding local solutions, + ``success`` a Boolean flag indicating if the optimizer exited + successfully, + ``message`` which describes the cause of the termination, + ``nfev`` the total number of objective function evaluations including + the sampling calls, + ``nlfev`` the total number of objective function evaluations + culminating from all local search optimizations, + ``nit`` number of iterations performed by the global routine. + + Notes + ----- + Global optimization using simplicial homology global optimization [1]_. + Appropriate for solving general purpose NLP and blackbox optimization + problems to global optimality (low-dimensional problems). + + In general, the optimization problems are of the form:: + + minimize f(x) subject to + + g_i(x) >= 0, i = 1,...,m + h_j(x) = 0, j = 1,...,p + + where x is a vector of one or more variables. ``f(x)`` is the objective + function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and + ``h_j(x)`` are the equality constraints. + + Optionally, the lower and upper bounds for each element in x can also be + specified using the `bounds` argument. + + While most of the theoretical advantages of SHGO are only proven for when + ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to + converge to the global optimum for the more general case where ``f(x)`` is + non-continuous, non-convex and non-smooth, if the default sampling method + is used [1]_. + + The local search method may be specified using the ``minimizer_kwargs`` + parameter which is passed on to ``scipy.optimize.minimize``. By default, + the ``SLSQP`` method is used. In general, it is recommended to use the + ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints + are defined for the problem since the other methods do not use constraints. + + The ``halton`` and ``sobol`` method points are generated using + `scipy.stats.qmc`. Any other QMC method could be used. + + References + ---------- + .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology + algorithm for lipschitz optimisation", Journal of Global + Optimization. + .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with + better two-dimensional projections", SIAM J. Sci. Comput. 30, + 2635-2654. + .. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear + programming codes", Lecture Notes in Economics and Mathematical + Systems, 187. Springer-Verlag, New York. + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and + dynamics from the potential energy landscape", + Journal of Chemical Physics, 142(13), 2015. + .. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize + + Examples + -------- + First consider the problem of minimizing the Rosenbrock function, `rosen`: + + >>> from scipy.optimize import rosen, shgo + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = shgo(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18) + + Note that bounds determine the dimensionality of the objective + function and is therefore a required input, however you can specify + empty bounds using ``None`` or objects like ``np.inf`` which will be + converted to large float numbers. + + >>> bounds = [(None, None), ]*4 + >>> result = shgo(rosen, bounds) + >>> result.x + array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ]) + + Next, we consider the Eggholder function, a problem with several local + minima and one global minimum. We will demonstrate the use of arguments and + the capabilities of `shgo`. + (https://en.wikipedia.org/wiki/Test_functions_for_optimization) + + >>> import numpy as np + >>> def eggholder(x): + ... return (-(x[1] + 47.0) + ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) + ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ... ) + ... + >>> bounds = [(-512, 512), (-512, 512)] + + `shgo` has built-in low discrepancy sampling sequences. First, we will + input 64 initial sampling points of the *Sobol'* sequence: + + >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol') + >>> result.x, result.fun + (array([512. , 404.23180824]), -959.6406627208397) + + `shgo` also has a return for any other local minima that was found, these + can be called using: + + >>> result.xl + array([[ 512. , 404.23180824], + [ 283.0759062 , -487.12565635], + [-294.66820039, -462.01964031], + [-105.87688911, 423.15323845], + [-242.97926 , 274.38030925], + [-506.25823477, 6.3131022 ], + [-408.71980731, -156.10116949], + [ 150.23207937, 301.31376595], + [ 91.00920901, -391.283763 ], + [ 202.89662724, -269.38043241], + [ 361.66623976, -106.96493868], + [-219.40612786, -244.06020508]]) + + >>> result.funl + array([-959.64066272, -718.16745962, -704.80659592, -565.99778097, + -559.78685655, -557.36868733, -507.87385942, -493.9605115 , + -426.48799655, -421.15571437, -419.31194957, -410.98477763]) + + These results are useful in applications where there are many global minima + and the values of other global minima are desired or where the local minima + can provide insight into the system (for example morphologies + in physical chemistry [4]_). + + If we want to find a larger number of local minima, we can increase the + number of sampling points or the number of iterations. We'll increase the + number of sampling points to 64 and the number of iterations from the + default of 1 to 3. Using ``simplicial`` this would have given us + 64 x 3 = 192 initial sampling points. + + >>> result_2 = shgo(eggholder, + ... bounds, n=64, iters=3, sampling_method='sobol') + >>> len(result.xl), len(result_2.xl) + (12, 23) + + Note the difference between, e.g., ``n=192, iters=1`` and ``n=64, + iters=3``. + In the first case the promising points contained in the minimiser pool + are processed only once. In the latter case it is processed every 64 + sampling points for a total of 3 times. + + To demonstrate solving problems with non-linear constraints consider the + following example from Hock and Schittkowski problem 73 (cattle-feed) + [3]_:: + + minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4 + + subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0, + + 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21 + -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 + + 20.5 * x_3**2 + 0.62 * x_4**2) >= 0, + + x_1 + x_2 + x_3 + x_4 - 1 == 0, + + 1 >= x_i >= 0 for all i + + The approximate answer given in [3]_ is:: + + f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378 + + >>> def f(x): # (cattle-feed) + ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3] + ... + >>> def g1(x): + ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0 + ... + >>> def g2(x): + ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21 + ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2 + ... + 20.5*x[2]**2 + 0.62*x[3]**2) + ... ) # >=0 + ... + >>> def h1(x): + ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + ... + >>> cons = ({'type': 'ineq', 'fun': g1}, + ... {'type': 'ineq', 'fun': g2}, + ... {'type': 'eq', 'fun': h1}) + >>> bounds = [(0, 1.0),]*4 + >>> res = shgo(f, bounds, n=150, constraints=cons) + >>> res + message: Optimization terminated successfully. + success: True + fun: 29.894378159142136 + funl: [ 2.989e+01] + x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] # may vary + xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] # may vary + nit: 1 + nfev: 142 # may vary + nlfev: 35 # may vary + nljev: 5 + nlhev: 0 + + >>> g1(res.x), g2(res.x), h1(res.x) + (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0) + + """ + # if necessary, convert bounds class to old bounds + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + # Initiate SHGO class + # use in context manager to make sure that any parallelization + # resources are freed. + with SHGO(func, bounds, args=args, constraints=constraints, n=n, + iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, + options=options, sampling_method=sampling_method, + workers=workers) as shc: + # Run the algorithm, process results and test success + shc.iterate_all() + + if not shc.break_routine: + if shc.disp: + logging.info("Successfully completed construction of complex.") + + # Test post iterations success + if len(shc.LMC.xl_maps) == 0: + # If sampling failed to find pool, return lowest sampled point + # with a warning + shc.find_lowest_vertex() + shc.break_routine = True + shc.fail_routine(mes="Failed to find a feasible minimizer point. " + f"Lowest sampling point = {shc.f_lowest}") + shc.res.fun = shc.f_lowest + shc.res.x = shc.x_lowest + shc.res.nfev = shc.fn + shc.res.tnev = shc.n_sampled + else: + # Test that the optimal solutions do not violate any constraints + pass # TODO + + # Confirm the routine ran successfully + if not shc.break_routine: + shc.res.message = 'Optimization terminated successfully.' + shc.res.success = True + + # Return the final results + return shc.res + + +class SHGO: + def __init__(self, func, bounds, args=(), constraints=None, n=None, + iters=None, callback=None, minimizer_kwargs=None, + options=None, sampling_method='simplicial', workers=1): + from scipy.stats import qmc + # Input checks + methods = ['halton', 'sobol', 'simplicial'] + if isinstance(sampling_method, str) and sampling_method not in methods: + raise ValueError(("Unknown sampling_method specified." + " Valid methods: {}").format(', '.join(methods))) + + # Split obj func if given with Jac + try: + if ((minimizer_kwargs['jac'] is True) and + (not callable(minimizer_kwargs['jac']))): + self.func = MemoizeJac(func) + jac = self.func.derivative + minimizer_kwargs['jac'] = jac + func = self.func # .fun + else: + self.func = func # Normal definition of objective function + except (TypeError, KeyError): + self.func = func # Normal definition of objective function + + # Initiate class + self.func = _FunctionWrapper(func, args) + self.bounds = bounds + self.args = args + self.callback = callback + + # Bounds + abound = np.array(bounds, float) + self.dim = np.shape(abound)[0] # Dimensionality of problem + + # Set none finite values to large floats + infind = ~np.isfinite(abound) + abound[infind[:, 0], 0] = -1e50 + abound[infind[:, 1], 1] = 1e50 + + # Check if bounds are correctly specified + bnderr = abound[:, 0] > abound[:, 1] + if bnderr.any(): + raise ValueError('Error: lb > ub in bounds {}.' + .format(', '.join(str(b) for b in bnderr))) + + self.bounds = abound + + # Constraints + # Process constraint dict sequence: + self.constraints = constraints + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + + # shgo internals deals with old-style constraints + # self.constraints is used to create Complex, so need + # to be stored internally in old-style. + # `minimize` takes care of normalising these constraints + # for slsqp/cobyla/trust-constr. + self.constraints = standardize_constraints( + constraints, + np.empty(self.dim, float), + 'old' + ) + for cons in self.constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Define local minimization keyword arguments + # Start with defaults + self.minimizer_kwargs = {'method': 'SLSQP', + 'bounds': self.bounds, + 'options': {}, + 'callback': self.callback + } + if minimizer_kwargs is not None: + # Overwrite with supplied values + self.minimizer_kwargs.update(minimizer_kwargs) + + else: + self.minimizer_kwargs['options'] = {'ftol': 1e-12} + + if ( + self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla', + 'trust-constr') + and ( + minimizer_kwargs is not None and + 'constraints' not in minimizer_kwargs and + constraints is not None + ) or + (self.g_cons is not None) + ): + self.minimizer_kwargs['constraints'] = self.min_cons + + # Process options dict + if options is not None: + self.init_options(options) + else: # Default settings: + self.f_min_true = None + self.minimize_every_iter = True + + # Algorithm limits + self.maxiter = None + self.maxfev = None + self.maxev = None + self.maxtime = None + self.f_min_true = None + self.minhgrd = None + + # Objective function knowledge + self.symmetry = None + + # Algorithm functionality + self.infty_cons_sampl = True + self.local_iter = False + + # Feedback + self.disp = False + + # Remove unknown arguments in self.minimizer_kwargs + # Start with arguments all the solvers have in common + self.min_solver_args = ['fun', 'x0', 'args', + 'callback', 'options', 'method'] + # then add the ones unique to specific solvers + solver_args = { + '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'], + 'nelder-mead': [], + 'powell': [], + 'cg': ['jac'], + 'bfgs': ['jac'], + 'newton-cg': ['jac', 'hess', 'hessp'], + 'l-bfgs-b': ['jac', 'bounds'], + 'tnc': ['jac', 'bounds'], + 'cobyla': ['constraints', 'catol'], + 'slsqp': ['jac', 'bounds', 'constraints'], + 'dogleg': ['jac', 'hess'], + 'trust-ncg': ['jac', 'hess', 'hessp'], + 'trust-krylov': ['jac', 'hess', 'hessp'], + 'trust-exact': ['jac', 'hess'], + 'trust-constr': ['jac', 'hess', 'hessp', 'constraints'], + } + method = self.minimizer_kwargs['method'] + self.min_solver_args += solver_args[method.lower()] + + # Only retain the known arguments + def _restrict_to_keys(dictionary, goodkeys): + """Remove keys from dictionary if not in goodkeys - inplace""" + existingkeys = set(dictionary) + for key in existingkeys - set(goodkeys): + dictionary.pop(key, None) + + _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args) + _restrict_to_keys(self.minimizer_kwargs['options'], + self.min_solver_args + ['ftol']) + + # Algorithm controls + # Global controls + self.stop_global = False # Used in the stopping_criteria method + self.break_routine = False # Break the algorithm globally + self.iters = iters # Iterations to be ran + self.iters_done = 0 # Iterations completed + self.n = n # Sampling points per iteration + self.nc = 0 # n # Sampling points to sample in current iteration + self.n_prc = 0 # Processed points (used to track Delaunay iters) + self.n_sampled = 0 # To track no. of sampling points already generated + self.fn = 0 # Number of feasible sampling points evaluations performed + self.hgr = 0 # Homology group rank + # Initially attempt to build the triangulation incrementally: + self.qhull_incremental = True + + # Default settings if no sampling criteria. + if (self.n is None) and (self.iters is None) \ + and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + self.nc = 0 # self.n + if self.iters is None: + self.iters = 1 + if (self.n is None) and not (sampling_method == 'simplicial'): + self.n = self.n = 100 + self.nc = 0 # self.n + if (self.n == 100) and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + + if not ((self.maxiter is None) and (self.maxfev is None) and ( + self.maxev is None) + and (self.minhgrd is None) and (self.f_min_true is None)): + self.iters = None + + # Set complex construction mode based on a provided stopping criteria: + # Initialise sampling Complex and function cache + # Note that sfield_args=() since args are already wrapped in self.func + # using the_FunctionWrapper class. + self.HC = Complex(dim=self.dim, domain=self.bounds, + sfield=self.func, sfield_args=(), + symmetry=self.symmetry, + constraints=self.constraints, + workers=workers) + + # Choose complex constructor + if sampling_method == 'simplicial': + self.iterate_complex = self.iterate_hypercube + self.sampling_method = sampling_method + + elif sampling_method in ['halton', 'sobol'] or \ + not isinstance(sampling_method, str): + self.iterate_complex = self.iterate_delaunay + # Sampling method used + if sampling_method in ['halton', 'sobol']: + if sampling_method == 'sobol': + self.n = int(2 ** np.ceil(np.log2(self.n))) + # self.n #TODO: Should always be self.n, this is + # unacceptable for shgo, check that nfev behaves as + # expected. + self.nc = 0 + self.sampling_method = 'sobol' + self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False, + seed=0) + else: + self.sampling_method = 'halton' + self.qmc_engine = qmc.Halton(d=self.dim, scramble=True, + seed=0) + + def sampling_method(n, d): + return self.qmc_engine.random(n) + + else: + # A user defined sampling method: + self.sampling_method = 'custom' + + self.sampling = self.sampling_custom + self.sampling_function = sampling_method # F(n, d) + + # Local controls + self.stop_l_iter = False # Local minimisation iterations + self.stop_complex_iter = False # Sampling iterations + + # Initiate storage objects used in algorithm classes + self.minimizer_pool = [] + + # Cache of local minimizers mapped + self.LMC = LMapCache() + + # Initialize return object + self.res = OptimizeResult() # scipy.optimize.OptimizeResult object + self.res.nfev = 0 # Includes each sampling point as func evaluation + self.res.nlfev = 0 # Local function evals for all minimisers + self.res.nljev = 0 # Local Jacobian evals for all minimisers + self.res.nlhev = 0 # Local Hessian evals for all minimisers + + # Initiation aids + def init_options(self, options): + """ + Initiates the options. + + Can also be useful to change parameters after class initiation. + + Parameters + ---------- + options : dict + + Returns + ------- + None + + """ + # Update 'options' dict passed to optimize.minimize + # Do this first so we don't mutate `options` below. + self.minimizer_kwargs['options'].update(options) + + # Ensure that 'jac', 'hess', and 'hessp' are passed directly to + # `minimize` as keywords, not as part of its 'options' dictionary. + for opt in ['jac', 'hess', 'hessp']: + if opt in self.minimizer_kwargs['options']: + self.minimizer_kwargs[opt] = ( + self.minimizer_kwargs['options'].pop(opt)) + + # Default settings: + self.minimize_every_iter = options.get('minimize_every_iter', True) + + # Algorithm limits + # Maximum number of iterations to perform. + self.maxiter = options.get('maxiter', None) + # Maximum number of function evaluations in the feasible domain + self.maxfev = options.get('maxfev', None) + # Maximum number of sampling evaluations (includes searching in + # infeasible points + self.maxev = options.get('maxev', None) + # Maximum processing runtime allowed + self.init = time.time() + self.maxtime = options.get('maxtime', None) + if 'f_min' in options: + # Specify the minimum objective function value, if it is known. + self.f_min_true = options['f_min'] + self.f_tol = options.get('f_tol', 1e-4) + else: + self.f_min_true = None + + self.minhgrd = options.get('minhgrd', None) + + # Objective function knowledge + self.symmetry = options.get('symmetry', False) + if self.symmetry: + self.symmetry = [0, ]*len(self.bounds) + else: + self.symmetry = None + # Algorithm functionality + # Only evaluate a few of the best candidates + self.local_iter = options.get('local_iter', False) + self.infty_cons_sampl = options.get('infty_constraints', True) + + # Feedback + self.disp = options.get('disp', False) + + def __enter__(self): + return self + + def __exit__(self, *args): + return self.HC.V._mapwrapper.__exit__(*args) + + # Iteration properties + # Main construction loop: + def iterate_all(self): + """ + Construct for `iters` iterations. + + If uniform sampling is used, every iteration adds 'n' sampling points. + + Iterations if a stopping criteria (e.g., sampling points or + processing time) has been met. + + """ + if self.disp: + logging.info('Splitting first generation') + + while not self.stop_global: + if self.break_routine: + break + # Iterate complex, process minimisers + self.iterate() + self.stopping_criteria() + + # Build minimiser pool + # Final iteration only needed if pools weren't minimised every + # iteration + if not self.minimize_every_iter: + if not self.break_routine: + self.find_minima() + + self.res.nit = self.iters_done # + 1 + self.fn = self.HC.V.nfev + + def find_minima(self): + """ + Construct the minimizer pool, map the minimizers to local minima + and sort the results into a global return object. + """ + if self.disp: + logging.info('Searching for minimizer pool...') + + self.minimizers() + + if len(self.X_min) != 0: + # Minimize the pool of minimizers with local minimization methods + # Note that if Options['local_iter'] is an `int` instead of default + # value False then only that number of candidates will be minimized + self.minimise_pool(self.local_iter) + # Sort results and build the global return object + self.sort_result() + + # Lowest values used to report in case of failures + self.f_lowest = self.res.fun + self.x_lowest = self.res.x + else: + self.find_lowest_vertex() + + if self.disp: + logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}") + + def find_lowest_vertex(self): + # Find the lowest objective function value on one of + # the vertices of the simplicial complex + self.f_lowest = np.inf + for x in self.HC.V.cache: + if self.HC.V[x].f < self.f_lowest: + if self.disp: + logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}') + self.f_lowest = self.HC.V[x].f + self.x_lowest = self.HC.V[x].x_a + for lmc in self.LMC.cache: + if self.LMC[lmc].f_min < self.f_lowest: + self.f_lowest = self.LMC[lmc].f_min + self.x_lowest = self.LMC[lmc].x_l + + if self.f_lowest == np.inf: # no feasible point + self.f_lowest = None + self.x_lowest = None + + # Stopping criteria functions: + def finite_iterations(self): + mi = min(x for x in [self.iters, self.maxiter] if x is not None) + if self.disp: + logging.info(f'Iterations done = {self.iters_done} / {mi}') + if self.iters is not None: + if self.iters_done >= (self.iters): + self.stop_global = True + + if self.maxiter is not None: # Stop for infeasible sampling + if self.iters_done >= (self.maxiter): + self.stop_global = True + return self.stop_global + + def finite_fev(self): + # Finite function evals in the feasible domain + if self.disp: + logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}') + if self.fn >= self.maxfev: + self.stop_global = True + return self.stop_global + + def finite_ev(self): + # Finite evaluations including infeasible sampling points + if self.disp: + logging.info(f'Sampling evaluations done = {self.n_sampled} ' + f'/ {self.maxev}') + if self.n_sampled >= self.maxev: + self.stop_global = True + + def finite_time(self): + if self.disp: + logging.info(f'Time elapsed = {time.time() - self.init} ' + f'/ {self.maxtime}') + if (time.time() - self.init) >= self.maxtime: + self.stop_global = True + + def finite_precision(self): + """ + Stop the algorithm if the final function value is known + + Specify in options (with ``self.f_min_true = options['f_min']``) + and the tolerance with ``f_tol = options['f_tol']`` + """ + # If no minimizer has been found use the lowest sampling value + self.find_lowest_vertex() + if self.disp: + logging.info(f'Lowest function evaluation = {self.f_lowest}') + logging.info(f'Specified minimum = {self.f_min_true}') + # If no feasible point was return from test + if self.f_lowest is None: + return self.stop_global + + # Function to stop algorithm at specified percentage error: + if self.f_min_true == 0.0: + if self.f_lowest <= self.f_tol: + self.stop_global = True + else: + pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true) + if self.f_lowest <= self.f_min_true: + self.stop_global = True + # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)): + if abs(pe) >= 2 * self.f_tol: + warnings.warn( + f"A much lower value than expected f* = {self.f_min_true} " + f"was found f_lowest = {self.f_lowest}", + stacklevel=3 + ) + if pe <= self.f_tol: + self.stop_global = True + + return self.stop_global + + def finite_homology_growth(self): + """ + Stop the algorithm if homology group rank did not grow in iteration. + """ + if self.LMC.size == 0: + return # pass on no reason to stop yet. + self.hgrd = self.LMC.size - self.hgr + + self.hgr = self.LMC.size + if self.hgrd <= self.minhgrd: + self.stop_global = True + if self.disp: + logging.info(f'Current homology growth = {self.hgrd} ' + f' (minimum growth = {self.minhgrd})') + return self.stop_global + + def stopping_criteria(self): + """ + Various stopping criteria ran every iteration + + Returns + ------- + stop : bool + """ + if self.maxiter is not None: + self.finite_iterations() + if self.iters is not None: + self.finite_iterations() + if self.maxfev is not None: + self.finite_fev() + if self.maxev is not None: + self.finite_ev() + if self.maxtime is not None: + self.finite_time() + if self.f_min_true is not None: + self.finite_precision() + if self.minhgrd is not None: + self.finite_homology_growth() + return self.stop_global + + def iterate(self): + self.iterate_complex() + + # Build minimizer pool + if self.minimize_every_iter: + if not self.break_routine: + self.find_minima() # Process minimizer pool + + # Algorithm updates + self.iters_done += 1 + + def iterate_hypercube(self): + """ + Iterate a subdivision of the complex + + Note: called with ``self.iterate_complex()`` after class initiation + """ + # Iterate the complex + if self.disp: + logging.info('Constructing and refining simplicial complex graph ' + 'structure') + if self.n is None: + self.HC.refine_all() + self.n_sampled = self.HC.V.size() # nevs counted + else: + self.HC.refine(self.n) + self.n_sampled += self.n + + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Re-add minimisers to complex + if len(self.LMC.xl_maps) > 0: + for xl in self.LMC.cache: + v = self.HC.V[xl] + v_near = v.star() + for v in v.nn: + v_near = v_near.union(v.nn) + # Reconnect vertices to complex + # if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l), + # near=v_near): + # continue + # else: + # If failure to find in v_near, then search all vertices + # (very expensive operation: + # self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l) + # ) + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + return + + def iterate_delaunay(self): + """ + Build a complex of Delaunay triangulated points + + Note: called with ``self.iterate_complex()`` after class initiation + """ + self.nc += self.n + self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl) + + # Add sampled points to a triangulation, construct self.Tri + if self.disp: + logging.info(f'self.n = {self.n}') + logging.info(f'self.nc = {self.nc}') + logging.info('Constructing and refining simplicial complex graph ' + 'structure from sampling points.') + + if self.dim < 2: + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Ind_sorted = self.Ind_sorted.flatten() + tris = [] + for ind, ind_s in enumerate(self.Ind_sorted): + if ind > 0: + tris.append(self.Ind_sorted[ind - 1:ind + 1]) + + tris = np.array(tris) + # Store 1D triangulation: + self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris) + self.points = {} + else: + if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built + self.delaunay_triangulation(n_prc=self.n_prc) + self.n_prc = self.C.shape[0] + + if self.disp: + logging.info('Triangulation completed, evaluating all ' + 'constraints and objective function values.') + + if hasattr(self, 'Tri'): + self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices) + + # Process all pools + # Evaluate all constraints and functions + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + self.n_sampled = self.nc # nevs counted in triangulation + return + + # Hypercube minimizers + def minimizers(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can implement parallelization here + for x in self.HC.V.cache: + in_LMC = False + if len(self.LMC.xl_maps) > 0: + for xlmi in self.LMC.xl_maps: + if np.all(np.array(x) == np.array(xlmi)): + in_LMC = True + if in_LMC: + continue + + if self.HC.V[x].minimiser(): + if self.disp: + logging.info('=' * 60) + logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer') + logging.info(f'v.f = {self.HC.V[x].f} is minimizer') + logging.info('=' * 30) + + if self.HC.V[x] not in self.minimizer_pool: + self.minimizer_pool.append(self.HC.V[x]) + + if self.disp: + logging.info('Neighbors:') + logging.info('=' * 30) + for vn in self.HC.V[x].nn: + logging.info(f'x = {vn.x} || f = {vn.f}') + + logging.info('=' * 60) + self.minimizer_pool_F = [] + self.X_min = [] + # normalized tuple in the Vertex cache + self.X_min_cache = {} # Cache used in hypercube sampling + + for v in self.minimizer_pool: + self.X_min.append(v.x_a) + self.minimizer_pool_F.append(v.f) + self.X_min_cache[tuple(v.x_a)] = v.x + + self.minimizer_pool_F = np.array(self.minimizer_pool_F) + self.X_min = np.array(self.X_min) + + # TODO: Only do this if global mode + self.sort_min_pool() + + return self.X_min + + # Local minimisation + # Minimiser pool processing + def minimise_pool(self, force_iter=False): + """ + This processing method can optionally minimise only the best candidate + solutions in the minimiser pool + + Parameters + ---------- + force_iter : int + Number of starting minimizers to process (can be specified + globally or locally) + + """ + # Find first local minimum + # NOTE: Since we always minimize this value regardless it is a waste to + # build the topograph first before minimizing + lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0]) + + # Trim minimized point from current minimizer set + self.trim_min_pool(0) + + while not self.stop_l_iter: + # Global stopping criteria: + self.stopping_criteria() + + # Note first iteration is outside loop: + if force_iter: + force_iter -= 1 + if force_iter == 0: + self.stop_l_iter = True + break + + if np.shape(self.X_min)[0] == 0: + self.stop_l_iter = True + break + + # Construct topograph from current minimizer set + # (NOTE: This is a very small topograph using only the minizer pool + # , it might be worth using some graph theory tools instead. + self.g_topograph(lres_f_min.x, self.X_min) + + # Find local minimum at the miniser with the greatest Euclidean + # distance from the current solution + ind_xmin_l = self.Z[:, -1] + lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1]) + + # Trim minimised point from current minimizer set + self.trim_min_pool(ind_xmin_l) + + # Reset controls + self.stop_l_iter = False + return + + def sort_min_pool(self): + # Sort to find minimum func value in min_pool + self.ind_f_min = np.argsort(self.minimizer_pool_F) + self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min] + self.minimizer_pool_F = np.array(self.minimizer_pool_F)[ + self.ind_f_min] + return + + def trim_min_pool(self, trim_ind): + self.X_min = np.delete(self.X_min, trim_ind, axis=0) + self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind) + self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind) + return + + def g_topograph(self, x_min, X_min): + """ + Returns the topographical vector stemming from the specified value + ``x_min`` for the current feasible set ``X_min`` with True boolean + values indicating positive entries and False values indicating + negative entries. + + """ + x_min = np.array([x_min]) + self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean') + # Find sorted indexes of spatial distances: + self.Z = np.argsort(self.Y, axis=-1) + + self.Ss = X_min[self.Z][0] + self.minimizer_pool = self.minimizer_pool[self.Z] + self.minimizer_pool = self.minimizer_pool[0] + return self.Ss + + # Local bound functions + def construct_lcb_simplicial(self, v_min): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + # Loop over all bounds + for vn in v_min.nn: + for i, x_i in enumerate(vn.x_a): + # Lower bound + if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]): + cbounds[i][0] = x_i + + # Upper bound + if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]): + cbounds[i][1] = x_i + + if self.disp: + logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}') + logging.info(f'cbounds = {cbounds}') + + return cbounds + + def construct_lcb_delaunay(self, v_min, ind=None): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + + return cbounds + + # Minimize a starting point locally + def minimize(self, x_min, ind=None): + """ + This function is used to calculate the local minima using the specified + sampling point as a starting value. + + Parameters + ---------- + x_min : vector of floats + Current starting point to minimize. + + Returns + ------- + lres : OptimizeResult + The local optimization result represented as a `OptimizeResult` + object. + """ + # Use minima maps if vertex was already run + if self.disp: + logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}') + + if self.LMC[x_min].lres is not None: + logging.info(f'Found self.LMC[x_min].lres = ' + f'{self.LMC[x_min].lres}') + return self.LMC[x_min].lres + + if self.callback is not None: + logging.info(f'Callback for minimizer starting at {x_min}:') + + if self.disp: + logging.info(f'Starting minimization at {x_min}...') + + if self.sampling_method == 'simplicial': + x_min_t = tuple(x_min) + # Find the normalized tuple in the Vertex cache: + x_min_t_norm = self.X_min_cache[tuple(x_min_t)] + x_min_t_norm = tuple(x_min_t_norm) + g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm]) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + else: + g_bounds = self.construct_lcb_delaunay(x_min, ind=ind) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + if self.disp and 'bounds' in self.minimizer_kwargs: + logging.info('bounds in kwarg:') + logging.info(self.minimizer_kwargs['bounds']) + + # Local minimization using scipy.optimize.minimize: + lres = minimize(self.func, x_min, **self.minimizer_kwargs) + + if self.disp: + logging.info(f'lres = {lres}') + + # Local function evals for all minimizers + self.res.nlfev += lres.nfev + if 'njev' in lres: + self.res.nljev += lres.njev + if 'nhev' in lres: + self.res.nlhev += lres.nhev + + try: # Needed because of the brain dead 1x1 NumPy arrays + lres.fun = lres.fun[0] + except (IndexError, TypeError): + lres.fun + + # Append minima maps + self.LMC[x_min] + self.LMC.add_res(x_min, lres, bounds=g_bounds) + + return lres + + # Post local minimization processing + def sort_result(self): + """ + Sort results and build the global return object + """ + # Sort results in local minima cache + results = self.LMC.sort_cache_result() + self.res.xl = results['xl'] + self.res.funl = results['funl'] + self.res.x = results['x'] + self.res.fun = results['fun'] + + # Add local func evals to sampling func evals + # Count the number of feasible vertices and add to local func evals: + self.res.nfev = self.fn + self.res.nlfev + return self.res + + # Algorithm controls + def fail_routine(self, mes=("Failed to converge")): + self.break_routine = True + self.res.success = False + self.X_min = [None] + self.res.message = mes + + def sampled_surface(self, infty_cons_sampl=False): + """ + Sample the function surface. + + There are 2 modes, if ``infty_cons_sampl`` is True then the sampled + points that are generated outside the feasible domain will be + assigned an ``inf`` value in accordance with SHGO rules. + This guarantees convergence and usually requires less objective + function evaluations at the computational costs of more Delaunay + triangulation points. + + If ``infty_cons_sampl`` is False, then the infeasible points are + discarded and only a subspace of the sampled points are used. This + comes at the cost of the loss of guaranteed convergence and usually + requires more objective function evaluations. + """ + # Generate sampling points + if self.disp: + logging.info('Generating sampling points') + self.sampling(self.nc, self.dim) + if len(self.LMC.xl_maps) > 0: + self.C = np.vstack((self.C, np.array(self.LMC.xl_maps))) + if not infty_cons_sampl: + # Find subspace of feasible points + if self.g_cons is not None: + self.sampling_subspace() + + # Sort remaining samples + self.sorted_samples() + + # Find objective function references + self.n_sampled = self.nc + + def sampling_custom(self, n, dim): + """ + Generates uniform sampling points in a hypercube and scales the points + to the bound limits. + """ + # Generate sampling points. + # Generate uniform sample points in [0, 1]^m \subset R^m + if self.n_sampled == 0: + self.C = self.sampling_function(n, dim) + else: + self.C = self.sampling_function(n, dim) + # Distribute over bounds + for i in range(len(self.bounds)): + self.C[:, i] = (self.C[:, i] * + (self.bounds[i][1] - self.bounds[i][0]) + + self.bounds[i][0]) + return self.C + + def sampling_subspace(self): + """Find subspace of feasible points from g_func definition""" + # Subspace of feasible points. + for ind, g in enumerate(self.g_cons): + # C.shape = (Z, dim) where Z is the number of sampling points to + # evaluate and dim is the dimensionality of the problem. + # the constraint function may not be vectorised so have to step + # through each sampling point sequentially. + feasible = np.array( + [np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C], + dtype=bool + ) + self.C = self.C[feasible] + + if self.C.size == 0: + self.res.message = ('No sampling point found within the ' + + 'feasible set. Increasing sampling ' + + 'size.') + # sampling correctly for both 1-D and >1-D cases + if self.disp: + logging.info(self.res.message) + + def sorted_samples(self): # Validated + """Find indexes of the sorted sampling points""" + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Xs = self.C[self.Ind_sorted] + return self.Ind_sorted, self.Xs + + def delaunay_triangulation(self, n_prc=0): + if hasattr(self, 'Tri') and self.qhull_incremental: + # TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps) + # in self.sampled_surface + self.Tri.add_points(self.C[n_prc:, :]) + else: + try: + self.Tri = spatial.Delaunay(self.C, + incremental=self.qhull_incremental, + ) + except spatial.QhullError: + if str(sys.exc_info()[1])[:6] == 'QH6239': + logging.warning('QH6239 Qhull precision error detected, ' + 'this usually occurs when no bounds are ' + 'specified, Qhull can only run with ' + 'handling cocircular/cospherical points' + ' and in this case incremental mode is ' + 'switched off. The performance of shgo ' + 'will be reduced in this mode.') + self.qhull_incremental = False + self.Tri = spatial.Delaunay(self.C, + incremental= + self.qhull_incremental) + else: + raise + + return self.Tri + + +class LMap: + def __init__(self, v): + self.v = v + self.x_l = None + self.lres = None + self.f_min = None + self.lbounds = [] + + +class LMapCache: + def __init__(self): + self.cache = {} + + # Lists for search queries + self.v_maps = [] + self.xl_maps = [] + self.xl_maps_set = set() + self.f_maps = [] + self.lbound_maps = [] + self.size = 0 + + def __getitem__(self, v): + try: + v = np.ndarray.tolist(v) + except TypeError: + pass + v = tuple(v) + try: + return self.cache[v] + except KeyError: + xval = LMap(v) + self.cache[v] = xval + + return self.cache[v] + + def add_res(self, v, lres, bounds=None): + v = np.ndarray.tolist(v) + v = tuple(v) + self.cache[v].x_l = lres.x + self.cache[v].lres = lres + self.cache[v].f_min = lres.fun + self.cache[v].lbounds = bounds + + # Update cache size + self.size += 1 + + # Cache lists for search queries + self.v_maps.append(v) + self.xl_maps.append(lres.x) + self.xl_maps_set.add(tuple(lres.x)) + self.f_maps.append(lres.fun) + self.lbound_maps.append(bounds) + + def sort_cache_result(self): + """ + Sort results and build the global return object + """ + results = {} + # Sort results and save + self.xl_maps = np.array(self.xl_maps) + self.f_maps = np.array(self.f_maps) + + # Sorted indexes in Func_min + ind_sorted = np.argsort(self.f_maps) + + # Save ordered list of minima + results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals + self.f_maps = np.array(self.f_maps) + results['funl'] = self.f_maps[ind_sorted] + results['funl'] = results['funl'].T + + # Find global of all minimizers + results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima + results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value + + self.xl_maps = np.ndarray.tolist(self.xl_maps) + self.f_maps = np.ndarray.tolist(self.f_maps) + return results diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py new file mode 100644 index 0000000000000000000000000000000000000000..8a6543bf1ccbeffea3876699dfe32cdb6c531a04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py @@ -0,0 +1,513 @@ +""" +This module implements the Sequential Least Squares Programming optimization +algorithm (SLSQP), originally developed by Dieter Kraft. +See http://www.netlib.org/toms/733 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + approx_jacobian + fmin_slsqp + +""" + +__all__ = ['approx_jacobian', 'fmin_slsqp'] + +import numpy as np +from scipy.optimize._slsqp import slsqp +from numpy import (zeros, array, linalg, append, concatenate, finfo, + sqrt, vstack, isfinite, atleast_1d) +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function, _clip_x_for_func, + _check_clip_x) +from ._numdiff import approx_derivative +from ._constraints import old_bound_to_new, _arr_to_scalar +from scipy._lib._array_api import atleast_nd, array_namespace + +# deprecated imports to be removed in SciPy 1.13.0 +from numpy import exp, inf # noqa: F401 + + +__docformat__ = "restructuredtext en" + +_epsilon = sqrt(finfo(float).eps) + + +def approx_jacobian(x, func, epsilon, *args): + """ + Approximate the Jacobian matrix of a callable function. + + Parameters + ---------- + x : array_like + The state vector at which to compute the Jacobian matrix. + func : callable f(x,*args) + The vector-valued function. + epsilon : float + The perturbation used to determine the partial derivatives. + args : sequence + Additional arguments passed to func. + + Returns + ------- + An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length + of the outputs of `func`, and ``lenx`` is the number of elements in + `x`. + + Notes + ----- + The approximation is done using forward differences. + + """ + # approx_derivative returns (m, n) == (lenf, lenx) + jac = approx_derivative(func, x, method='2-point', abs_step=epsilon, + args=args) + # if func returns a scalar jac.shape will be (lenx,). Make sure + # it's at least a 2D array. + return np.atleast_2d(jac) + + +def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, + bounds=(), fprime=None, fprime_eqcons=None, + fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, + iprint=1, disp=None, full_output=0, epsilon=_epsilon, + callback=None): + """ + Minimize a function using Sequential Least Squares Programming + + Python interface function for the SLSQP Optimization subroutine + originally implemented by Dieter Kraft. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. Must return a scalar. + x0 : 1-D ndarray of float + Initial guess for the independent variable(s). + eqcons : list, optional + A list of functions of length n such that + eqcons[j](x,*args) == 0.0 in a successfully optimized + problem. + f_eqcons : callable f(x,*args), optional + Returns a 1-D array in which each element must equal 0.0 in a + successfully optimized problem. If f_eqcons is specified, + eqcons is ignored. + ieqcons : list, optional + A list of functions of length n such that + ieqcons[j](x,*args) >= 0.0 in a successfully optimized + problem. + f_ieqcons : callable f(x,*args), optional + Returns a 1-D ndarray in which each element must be greater or + equal to 0.0 in a successfully optimized problem. If + f_ieqcons is specified, ieqcons is ignored. + bounds : list, optional + A list of tuples specifying the lower and upper bound + for each independent variable [(xl0, xu0),(xl1, xu1),...] + Infinite values will be interpreted as large floating values. + fprime : callable `f(x,*args)`, optional + A function that evaluates the partial derivatives of func. + fprime_eqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of equality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_eqcons should be sized as ( len(eqcons), len(x0) ). + fprime_ieqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of inequality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). + args : sequence, optional + Additional arguments passed to func and fprime. + iter : int, optional + The maximum number of iterations. + acc : float, optional + Requested accuracy. + iprint : int, optional + The verbosity of fmin_slsqp : + + * iprint <= 0 : Silent operation + * iprint == 1 : Print summary upon completion (default) + * iprint >= 2 : Print status of each iterate and summary + disp : int, optional + Overrides the iprint interface (preferred). + full_output : bool, optional + If False, return only the minimizer of func (default). + Otherwise, output final objective function and summary + information. + epsilon : float, optional + The step size for finite-difference derivative estimates. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + out : ndarray of float + The final minimizer of func. + fx : ndarray of float, if full_output is true + The final value of the objective function. + its : int, if full_output is true + The number of iterations. + imode : int, if full_output is true + The exit mode from the optimizer (see below). + smode : string, if full_output is true + Message describing the exit mode from the optimizer. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'SLSQP' `method` in particular. + + Notes + ----- + Exit modes are defined as follows :: + + -1 : Gradient evaluation required (g & a) + 0 : Optimization terminated successfully + 1 : Function evaluation required (f & c) + 2 : More equality constraints than independent variables + 3 : More than 3*n iterations in LSQ subproblem + 4 : Inequality constraints incompatible + 5 : Singular matrix E in LSQ subproblem + 6 : Singular matrix C in LSQ subproblem + 7 : Rank-deficient equality constraint subproblem HFTI + 8 : Positive directional derivative for linesearch + 9 : Iteration limit reached + + Examples + -------- + Examples are given :ref:`in the tutorial `. + + """ + if disp is not None: + iprint = disp + + opts = {'maxiter': iter, + 'ftol': acc, + 'iprint': iprint, + 'disp': iprint != 0, + 'eps': epsilon, + 'callback': callback} + + # Build the constraints as a tuple of dictionaries + cons = () + # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take + # the same extra arguments as the objective function. + cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) + cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) + # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian + # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments + # as the objective function. + if f_eqcons: + cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, + 'args': args}, ) + if f_ieqcons: + cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, + 'args': args}, ) + + res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, + constraints=cons, **opts) + if full_output: + return res['x'], res['fun'], res['nit'], res['status'], res['message'] + else: + return res['x'] + + +def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, + constraints=(), + maxiter=100, ftol=1.0E-6, iprint=1, disp=False, + eps=_epsilon, callback=None, finite_diff_rel_step=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using Sequential + Least Squares Programming (SLSQP). + + Options + ------- + ftol : float + Precision goal for the value of f in the stopping criterion. + eps : float + Step size used for numerical approximation of the Jacobian. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored and set to 0. + maxiter : int + Maximum number of iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of `jac`. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + """ + _check_unknown_options(unknown_options) + iter = maxiter - 1 + acc = ftol + epsilon = eps + + if not disp: + iprint = 0 + + # Transform x0 into an array. + xp = array_namespace(x0) + x0 = atleast_nd(x0, ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x = xp.reshape(xp.astype(x0, dtype), -1) + + # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by + # ScalarFunction + if bounds is None or len(bounds) == 0: + new_bounds = (-np.inf, np.inf) + else: + new_bounds = old_bound_to_new(bounds) + + # clip the initial guess to bounds, otherwise ScalarFunction doesn't work + x = np.clip(x, new_bounds[0], new_bounds[1]) + + # Constraints are triaged per type into a dictionary of tuples + if isinstance(constraints, dict): + constraints = (constraints, ) + + cons = {'eq': (), 'ineq': ()} + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + + # check function + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + # check Jacobian + cjac = con.get('jac') + if cjac is None: + # approximate Jacobian function. The factory function is needed + # to keep a reference to `fun`, see gh-4240. + def cjac_factory(fun): + def cjac(x, *args): + x = _check_clip_x(x, new_bounds) + + if jac in ['2-point', '3-point', 'cs']: + return approx_derivative(fun, x, method=jac, args=args, + rel_step=finite_diff_rel_step, + bounds=new_bounds) + else: + return approx_derivative(fun, x, method='2-point', + abs_step=epsilon, args=args, + bounds=new_bounds) + + return cjac + cjac = cjac_factory(con['fun']) + + # update constraints' dictionary + cons[ctype] += ({'fun': con['fun'], + 'jac': cjac, + 'args': con.get('args', ())}, ) + + exit_modes = {-1: "Gradient evaluation required (g & a)", + 0: "Optimization terminated successfully", + 1: "Function evaluation required (f & c)", + 2: "More equality constraints than independent variables", + 3: "More than 3*n iterations in LSQ subproblem", + 4: "Inequality constraints incompatible", + 5: "Singular matrix E in LSQ subproblem", + 6: "Singular matrix C in LSQ subproblem", + 7: "Rank-deficient equality constraint subproblem HFTI", + 8: "Positive directional derivative for linesearch", + 9: "Iteration limit reached"} + + # Set the parameters that SLSQP will need + # meq, mieq: number of equality and inequality constraints + meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['eq']])) + mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['ineq']])) + # m = The total number of constraints + m = meq + mieq + # la = The number of constraints, or 1 if there are no constraints + la = array([1, m]).max() + # n = The number of independent variables + n = len(x) + + # Define the workspaces for SLSQP + n1 = n + 1 + mineq = m - meq + n1 + n1 + len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 + len_jw = mineq + w = zeros(len_w) + jw = zeros(len_jw) + + # Decompose bounds into xl and xu + if bounds is None or len(bounds) == 0: + xl = np.empty(n, dtype=float) + xu = np.empty(n, dtype=float) + xl.fill(np.nan) + xu.fill(np.nan) + else: + bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u)) + for (l, u) in bounds], float) + if bnds.shape[0] != n: + raise IndexError('SLSQP Error: the length of bounds is not ' + 'compatible with that of x0.') + + with np.errstate(invalid='ignore'): + bnderr = bnds[:, 0] > bnds[:, 1] + + if bnderr.any(): + raise ValueError('SLSQP Error: lb > ub in bounds %s.' % + ', '.join(str(b) for b in bnderr)) + xl, xu = bnds[:, 0], bnds[:, 1] + + # Mark infinite bounds with nans; the Fortran code understands this + infbnd = ~isfinite(bnds) + xl[infbnd[:, 0]] = np.nan + xu[infbnd[:, 1]] = np.nan + + # ScalarFunction provides function and gradient evaluation + sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this + # doesn't get sent to the func/grad evaluator. + wrapped_fun = _clip_x_for_func(sf.fun, new_bounds) + wrapped_grad = _clip_x_for_func(sf.grad, new_bounds) + + # Initialize the iteration counter and the mode value + mode = array(0, int) + acc = array(acc, float) + majiter = array(iter, int) + majiter_prev = 0 + + # Initialize internal SLSQP state variables + alpha = array(0, float) + f0 = array(0, float) + gs = array(0, float) + h1 = array(0, float) + h2 = array(0, float) + h3 = array(0, float) + h4 = array(0, float) + t = array(0, float) + t0 = array(0, float) + tol = array(0, float) + iexact = array(0, int) + incons = array(0, int) + ireset = array(0, int) + itermx = array(0, int) + line = array(0, int) + n1 = array(0, int) + n2 = array(0, int) + n3 = array(0, int) + + # Print the header if iprint >= 2 + if iprint >= 2: + print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) + + # mode is zero on entry, so call objective, constraints and gradients + # there should be no func evaluations here because it's cached from + # ScalarFunction + fx = wrapped_fun(x) + g = append(wrapped_grad(x), 0.0) + c = _eval_constraint(x, cons) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + while 1: + # Call SLSQP + slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw, + alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, + iexact, incons, ireset, itermx, line, + n1, n2, n3) + + if mode == 1: # objective and constraint evaluation required + fx = wrapped_fun(x) + c = _eval_constraint(x, cons) + + if mode == -1: # gradient evaluation required + g = append(wrapped_grad(x), 0.0) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + if majiter > majiter_prev: + # call callback if major iteration has incremented + if callback is not None: + callback(np.copy(x)) + + # Print the status of the current iterate if iprint > 2 + if iprint >= 2: + print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev, + fx, linalg.norm(g))) + + # If exit mode is not -1 or 1, slsqp has completed + if abs(mode) != 1: + break + + majiter_prev = int(majiter) + + # Optimization loop complete. Print status if requested + if iprint >= 1: + print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') + print(" Current function value:", fx) + print(" Iterations:", majiter) + print(" Function evaluations:", sf.nfev) + print(" Gradient evaluations:", sf.ngev) + + return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), + nfev=sf.nfev, njev=sf.ngev, status=int(mode), + message=exit_modes[int(mode)], success=(mode == 0)) + + +def _eval_constraint(x, cons): + # Compute constraints + if cons['eq']: + c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['eq']]) + else: + c_eq = zeros(0) + + if cons['ineq']: + c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['ineq']]) + else: + c_ieq = zeros(0) + + # Now combine c_eq and c_ieq into a single matrix + c = concatenate((c_eq, c_ieq)) + return c + + +def _eval_con_normals(x, cons, la, n, m, meq, mieq): + # Compute the normals of the constraints + if cons['eq']: + a_eq = vstack([con['jac'](x, *con['args']) + for con in cons['eq']]) + else: # no equality constraint + a_eq = zeros((meq, n)) + + if cons['ineq']: + a_ieq = vstack([con['jac'](x, *con['args']) + for con in cons['ineq']]) + else: # no inequality constraint + a_ieq = zeros((mieq, n)) + + # Now combine a_eq and a_ieq into a single a matrix + if m == 0: # no constraints + a = zeros((la, n)) + else: + a = vstack((a_eq, a_ieq)) + a = concatenate((a, zeros([la, 1])), 1) + + return a diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_spectral.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff5bef0283b2d6b6c018c1c8b98cd46a335d7cb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_spectral.py @@ -0,0 +1,260 @@ +""" +Spectral Algorithm for Nonlinear Equations +""" +import collections + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize._optimize import _check_unknown_options +from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng + +class _NoConvergence(Exception): + pass + + +def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, + fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, + sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): + r""" + Solve nonlinear equation with the DF-SANE method + + Options + ------- + ftol : float, optional + Relative norm tolerance. + fatol : float, optional + Absolute norm tolerance. + Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. + fnorm : callable, optional + Norm to use in the convergence check. If None, 2-norm is used. + maxfev : int, optional + Maximum number of function evaluations. + disp : bool, optional + Whether to print convergence process to stdout. + eta_strategy : callable, optional + Choice of the ``eta_k`` parameter, which gives slack for growth + of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with + `k` the iteration number, `x` the current iterate and `F` the current + residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. + Default: ``||F||**2 / (1 + k)**2``. + sigma_eps : float, optional + The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. + Default: 1e-10 + sigma_0 : float, optional + Initial spectral coefficient. + Default: 1.0 + M : int, optional + Number of iterates to include in the nonmonotonic line search. + Default: 10 + line_search : {'cruz', 'cheng'} + Type of line search to employ. 'cruz' is the original one defined in + [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is + a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. + Default: 'cruz' + + References + ---------- + .. [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). + .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). + + """ + _check_unknown_options(unknown_options) + + if line_search not in ('cheng', 'cruz'): + raise ValueError(f"Invalid value {line_search!r} for 'line_search'") + + nexp = 2 + + if eta_strategy is None: + # Different choice from [1], as their eta is not invariant + # vs. scaling of F. + def eta_strategy(k, x, F): + # Obtain squared 2-norm of the initial residual from the outer scope + return f_0 / (1 + k)**2 + + if fnorm is None: + def fnorm(F): + # Obtain squared 2-norm of the current residual from the outer scope + return f_k**(1.0/nexp) + + def fmerit(F): + return np.linalg.norm(F)**nexp + + nfev = [0] + f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, + nfev, maxfev, args) + + k = 0 + f_0 = f_k + sigma_k = sigma_0 + + F_0_norm = fnorm(F_k) + + # For the 'cruz' line search + prev_fs = collections.deque([f_k], M) + + # For the 'cheng' line search + Q = 1.0 + C = f_0 + + converged = False + message = "too many function evaluations required" + + while True: + F_k_norm = fnorm(F_k) + + if disp: + print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) + + if callback is not None: + callback(x_k, F_k) + + if F_k_norm < ftol * F_0_norm + fatol: + # Converged! + message = "successful convergence" + converged = True + break + + # Control spectral parameter, from [2] + if abs(sigma_k) > 1/sigma_eps: + sigma_k = 1/sigma_eps * np.sign(sigma_k) + elif abs(sigma_k) < sigma_eps: + sigma_k = sigma_eps + + # Line search direction + d = -sigma_k * F_k + + # Nonmonotone line search + eta = eta_strategy(k, x_k, F_k) + try: + if line_search == 'cruz': + alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, + eta=eta) + elif line_search == 'cheng': + alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, + C, Q, eta=eta) + except _NoConvergence: + break + + # Update spectral parameter + s_k = xp - x_k + y_k = Fp - F_k + sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) + + # Take step + x_k = xp + F_k = Fp + f_k = fp + + # Store function value + if line_search == 'cruz': + prev_fs.append(fp) + + k += 1 + + x = _wrap_result(x_k, is_complex, shape=x_shape) + F = _wrap_result(F_k, is_complex) + + result = OptimizeResult(x=x, success=converged, + message=message, + fun=F, nfev=nfev[0], nit=k, method="df-sane") + + return result + + +def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): + """ + Wrap a function and an initial value so that (i) complex values + are wrapped to reals, and (ii) value for a merit function + fmerit(x, f) is computed at the same time, (iii) iteration count + is maintained and an exception is raised if it is exceeded. + + Parameters + ---------- + func : callable + Function to wrap + x0 : ndarray + Initial value + fmerit : callable + Merit function fmerit(f) for computing merit value from residual. + nfev_list : list + List to store number of evaluations in. Should be [0] in the beginning. + maxfev : int + Maximum number of evaluations before _NoConvergence is raised. + args : tuple + Extra arguments to func + + Returns + ------- + wrap_func : callable + Wrapped function, to be called as + ``F, fp = wrap_func(x0)`` + x0_wrap : ndarray of float + Wrapped initial value; raveled to 1-D and complex + values mapped to reals. + x0_shape : tuple + Shape of the initial value array + f : float + Merit function at F + F : ndarray of float + Residual at x0_wrap + is_complex : bool + Whether complex values were mapped to reals + + """ + x0 = np.asarray(x0) + x0_shape = x0.shape + F = np.asarray(func(x0, *args)).ravel() + is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) + x0 = x0.ravel() + + nfev_list[0] = 1 + + if is_complex: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + z = _real2complex(x).reshape(x0_shape) + v = np.asarray(func(z, *args)).ravel() + F = _complex2real(v) + f = fmerit(F) + return f, F + + x0 = _complex2real(x0) + F = _complex2real(F) + else: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + x = x.reshape(x0_shape) + F = np.asarray(func(x, *args)).ravel() + f = fmerit(F) + return f, F + + return wrap_func, x0, x0_shape, fmerit(F), F, is_complex + + +def _wrap_result(result, is_complex, shape=None): + """ + Convert from real to complex and reshape result arrays. + """ + if is_complex: + z = _real2complex(result) + else: + z = result + if shape is not None: + z = z.reshape(shape) + return z + + +def _real2complex(x): + return np.ascontiguousarray(x, dtype=float).view(np.complex128) + + +def _complex2real(z): + return np.ascontiguousarray(z, dtype=complex).view(np.float64) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tnc.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0b3be740368eb759d608b541930dbb88ec042b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tnc.py @@ -0,0 +1,430 @@ +# TNC Python interface +# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ + +# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: + +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +TNC: A Python interface to the TNC non-linear optimizer + +TNC is a non-linear optimizer. To use it, you must provide a function to +minimize. The function must take one argument: the list of coordinates where to +evaluate the function; and it must return either a tuple, whose first element is the +value of the function, and whose second argument is the gradient of the function +(as a list of values); or None, to abort the minimization. +""" + +from scipy.optimize import _moduleTNC as moduleTNC +from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new +from scipy._lib._array_api import atleast_nd, array_namespace + +from numpy import inf, array, zeros + +__all__ = ['fmin_tnc'] + + +MSG_NONE = 0 # No messages +MSG_ITER = 1 # One line per iteration +MSG_INFO = 2 # Informational messages +MSG_VERS = 4 # Version info +MSG_EXIT = 8 # Exit reasons +MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT + +MSGS = { + MSG_NONE: "No messages", + MSG_ITER: "One line per iteration", + MSG_INFO: "Informational messages", + MSG_VERS: "Version info", + MSG_EXIT: "Exit reasons", + MSG_ALL: "All messages" +} + +INFEASIBLE = -1 # Infeasible (lower bound > upper bound) +LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) +FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) +XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) +MAXFUN = 3 # Max. number of function evaluations reached +LSFAIL = 4 # Linear search failed +CONSTANT = 5 # All lower bounds are equal to the upper bounds +NOPROGRESS = 6 # Unable to progress +USERABORT = 7 # User requested end of minimization + +RCSTRINGS = { + INFEASIBLE: "Infeasible (lower bound > upper bound)", + LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", + FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", + XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", + MAXFUN: "Max. number of function evaluations reached", + LSFAIL: "Linear search failed", + CONSTANT: "All lower bounds are equal to the upper bounds", + NOPROGRESS: "Unable to progress", + USERABORT: "User requested end of minimization" +} + +# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in +# SciPy + + +def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, + bounds=None, epsilon=1e-8, scale=None, offset=None, + messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, + stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, + rescale=-1, disp=None, callback=None): + """ + Minimize a function with variables subject to bounds, using + gradient information in a truncated Newton algorithm. This + method wraps a C implementation of the algorithm. + + Parameters + ---------- + func : callable ``func(x, *args)`` + Function to minimize. Must do one of: + + 1. Return f and g, where f is the value of the function and g its + gradient (a list of floats). + + 2. Return the function value but supply gradient function + separately as `fprime`. + + 3. Return the function value and set ``approx_grad=True``. + + If the function returns None, the minimization + is aborted. + x0 : array_like + Initial estimate of minimum. + fprime : callable ``fprime(x, *args)``, optional + Gradient of `func`. If None, then either `func` must return the + function value and the gradient (``f,g = func(x, *args)``) + or `approx_grad` must be True. + args : tuple, optional + Arguments to pass to function. + approx_grad : bool, optional + If true, approximate the gradient numerically. + bounds : list, optional + (min, max) pairs for each element in x0, defining the + bounds on that parameter. Use None or +/-inf for one of + min or max when there is no bound in that direction. + epsilon : float, optional + Used if approx_grad is True. The stepsize in a finite + difference approximation for fprime. + scale : array_like, optional + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x| for the others. Defaults to None. + offset : array_like, optional + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + messages : int, optional + Bit mask used to select messages display during + minimization values defined in the MSGS dict. Defaults to + MGS_ALL. + disp : int, optional + Integer interface to messages. 0 = no message, 5 = all messages + maxCGit : int, optional + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + maxfun : int, optional + Maximum number of function evaluation. If None, maxfun is + set to max(100, 10*len(x0)). Defaults to None. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + eta : float, optional + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float, optional + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float, optional + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + fmin : float, optional + Minimum function value estimate. Defaults to 0. + ftol : float, optional + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float, optional + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + pgtol : float, optional + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float, optional + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + + Returns + ------- + x : ndarray + The solution. + nfeval : int + The number of function evaluations. + rc : int + Return code, see below + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'TNC' `method` in particular. + + Notes + ----- + The underlying algorithm is truncated Newton, also called + Newton Conjugate-Gradient. This method differs from + scipy.optimize.fmin_ncg in that + + 1. it wraps a C implementation of the algorithm + 2. it allows each variable to be given an upper and lower bound. + + The algorithm incorporates the bound constraints by determining + the descent direction as in an unconstrained truncated Newton, + but never taking a step-size large enough to leave the space + of feasible x's. The algorithm keeps track of a set of + currently active constraints, and ignores them when computing + the minimum allowable step size. (The x's associated with the + active constraint are kept fixed.) If the maximum allowable + step size is zero then a new constraint is added. At the end + of each iteration one of the constraints may be deemed no + longer active and removed. A constraint is considered + no longer active is if it is currently active + but the gradient for that variable points inward from the + constraint. The specific constraint removed is the one + associated with the variable of largest index whose + constraint is no longer active. + + Return codes are defined as follows:: + + -1 : Infeasible (lower bound > upper bound) + 0 : Local minimum reached (|pg| ~= 0) + 1 : Converged (|f_n-f_(n-1)| ~= 0) + 2 : Converged (|x_n-x_(n-1)| ~= 0) + 3 : Max. number of function evaluations reached + 4 : Linear search failed + 5 : All lower bounds are equal to the upper bounds + 6 : Unable to progress + 7 : User requested end of minimization + + References + ---------- + Wright S., Nocedal J. (2006), 'Numerical Optimization' + + Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", + SIAM Journal of Numerical Analysis 21, pp. 770-778 + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + if disp is not None: # disp takes precedence over messages + mesg_num = disp + else: + mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) + # build options + opts = {'eps': epsilon, + 'scale': scale, + 'offset': offset, + 'mesg_num': mesg_num, + 'maxCGit': maxCGit, + 'maxfun': maxfun, + 'eta': eta, + 'stepmx': stepmx, + 'accuracy': accuracy, + 'minfev': fmin, + 'ftol': ftol, + 'xtol': xtol, + 'gtol': pgtol, + 'rescale': rescale, + 'disp': False} + + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) + + return res['x'], res['nfev'], res['status'] + + +def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, + eps=1e-8, scale=None, offset=None, mesg_num=None, + maxCGit=-1, eta=-1, stepmx=0, accuracy=0, + minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, + callback=None, finite_diff_rel_step=None, maxfun=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using a truncated + Newton (TNC) algorithm. + + Options + ------- + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + scale : list of floats + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x] for the others. Defaults to None. + offset : float + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + disp : bool + Set to True to print convergence messages. + maxCGit : int + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + eta : float + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + minfev : float + Minimum function value estimate. Defaults to 0. + ftol : float + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + gtol : float + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + maxfun : int + Maximum number of function evaluations. If None, `maxfun` is + set to max(100, 10*len(x0)). Defaults to None. + """ + _check_unknown_options(unknown_options) + fmin = minfev + pgtol = gtol + + xp = array_namespace(x0) + x0 = atleast_nd(x0, ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x0 = xp.reshape(xp.astype(x0, dtype), -1) + + n = len(x0) + + if bounds is None: + bounds = [(None,None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + new_bounds = old_bound_to_new(bounds) + + if mesg_num is not None: + messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) + elif disp: + messages = MSG_ALL + else: + messages = MSG_NONE + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + func_and_grad = sf.fun_and_grad + + """ + low, up : the bounds (lists of floats) + if low is None, the lower bounds are removed. + if up is None, the upper bounds are removed. + low and up defaults to None + """ + low = zeros(n) + up = zeros(n) + for i in range(n): + if bounds[i] is None: + l, u = -inf, inf + else: + l,u = bounds[i] + if l is None: + low[i] = -inf + else: + low[i] = l + if u is None: + up[i] = inf + else: + up[i] = u + + if scale is None: + scale = array([]) + + if offset is None: + offset = array([]) + + if maxfun is None: + maxfun = max(100, 10*len(x0)) + + rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize( + func_and_grad, x0, low, up, scale, + offset, messages, maxCGit, maxfun, + eta, stepmx, accuracy, fmin, ftol, + xtol, pgtol, rescale, callback + ) + # the TNC documentation states: "On output, x, f and g may be very + # slightly out of sync because of scaling". Therefore re-evaluate + # func_and_grad so they are synced. + funv, jacv = func_and_grad(x) + + return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev, + nit=nit, status=rc, message=RCSTRINGS[rc], + success=(-1 < rc < 3)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion.py new file mode 100644 index 0000000000000000000000000000000000000000..f2355cf68ac8e1cac7e2688a9b91364ff2b2dcee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion.py @@ -0,0 +1,304 @@ +"""Trust-region optimization.""" +import math +import warnings + +import numpy as np +import scipy.linalg +from ._optimize import (_check_unknown_options, _status_message, + OptimizeResult, _prepare_scalar_function, + _call_callback_maybe_halt) +from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy +from scipy.optimize._differentiable_functions import FD_METHODS +__all__ = [] + + +def _wrap_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + return function(np.copy(x), *(wrapper_args + args)) + + return ncalls, function_wrapper + + +class BaseQuadraticSubproblem: + """ + Base/abstract class defining the quadratic model for trust-region + minimization. Child classes must implement the ``solve`` method. + + Values of the objective function, Jacobian and Hessian (if provided) at + the current iterate ``x`` are evaluated on demand and then stored as + attributes ``fun``, ``jac``, ``hess``. + """ + + def __init__(self, x, fun, jac, hess=None, hessp=None): + self._x = x + self._f = None + self._g = None + self._h = None + self._g_mag = None + self._cauchy_point = None + self._newton_point = None + self._fun = fun + self._jac = jac + self._hess = hess + self._hessp = hessp + + def __call__(self, p): + return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) + + @property + def fun(self): + """Value of objective function at current iteration.""" + if self._f is None: + self._f = self._fun(self._x) + return self._f + + @property + def jac(self): + """Value of Jacobian of objective function at current iteration.""" + if self._g is None: + self._g = self._jac(self._x) + return self._g + + @property + def hess(self): + """Value of Hessian of objective function at current iteration.""" + if self._h is None: + self._h = self._hess(self._x) + return self._h + + def hessp(self, p): + if self._hessp is not None: + return self._hessp(self._x, p) + else: + return np.dot(self.hess, p) + + @property + def jac_mag(self): + """Magnitude of jacobian of objective function at current iteration.""" + if self._g_mag is None: + self._g_mag = scipy.linalg.norm(self.jac) + return self._g_mag + + def get_boundaries_intersections(self, z, d, trust_radius): + """ + Solve the scalar quadratic equation ``||z + t d|| == trust_radius``. + This is like a line-sphere intersection. + Return the two values of t, sorted from low to high. + """ + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + sqrt_discriminant = math.sqrt(b*b - 4*a*c) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + math.copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + return sorted([ta, tb]) + + def solve(self, trust_radius): + raise NotImplementedError('The solve method should be implemented by ' + 'the child class') + + +def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, + subproblem=None, initial_trust_radius=1.0, + max_trust_radius=1000.0, eta=0.15, gtol=1e-4, + maxiter=None, disp=False, return_all=False, + callback=None, inexact=True, **unknown_options): + """ + Minimization of scalar function of one or more variables using a + trust-region algorithm. + + Options for the trust-region algorithm are: + initial_trust_radius : float + Initial trust radius. + max_trust_radius : float + Never propose steps that are longer than this value. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` + before successful termination. + maxiter : int + Maximum number of iterations to perform. + disp : bool + If True, print convergence message. + inexact : bool + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. Only effective for method + trust-krylov. + + This function is called by the `minimize` function. + It is not supposed to be called directly. + """ + _check_unknown_options(unknown_options) + + if jac is None: + raise ValueError('Jacobian is currently required for trust-region ' + 'methods') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + if subproblem is None: + raise ValueError('A subproblem solving strategy is required for ' + 'trust-region methods') + if not (0 <= eta < 0.25): + raise Exception('invalid acceptance stringency') + if max_trust_radius <= 0: + raise Exception('the max trust radius must be positive') + if initial_trust_radius <= 0: + raise ValueError('the initial trust radius must be positive') + if initial_trust_radius >= max_trust_radius: + raise ValueError('the initial trust radius must be less than the ' + 'max trust radius') + + # force the initial guess into a nice format + x0 = np.asarray(x0).flatten() + + # A ScalarFunction representing the problem. This caches calls to fun, jac, + # hess. + sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args) + fun = sf.fun + jac = sf.grad + if callable(hess): + hess = sf.hess + elif callable(hessp): + # this elif statement must come before examining whether hess + # is estimated by FD methods or a HessianUpdateStrategy + pass + elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): + # If the Hessian is being estimated by finite differences or a + # Hessian update strategy then ScalarFunction.hess returns a + # LinearOperator or a HessianUpdateStrategy. This enables the + # calculation/creation of a hessp. BUT you only want to do this + # if the user *hasn't* provided a callable(hessp) function. + hess = None + + def hessp(x, p, *args): + return sf.hess(x).dot(p) + else: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + + # ScalarFunction doesn't represent hessp + nhessp, hessp = _wrap_function(hessp, args) + + # limit the number of iterations + if maxiter is None: + maxiter = len(x0)*200 + + # init the search status + warnflag = 0 + + # initialize the search + trust_radius = initial_trust_radius + x = x0 + if return_all: + allvecs = [x] + m = subproblem(x, fun, jac, hess, hessp) + k = 0 + + # search for the function min + # do not even start if the gradient is small enough + while m.jac_mag >= gtol: + + # Solve the sub-problem. + # This gives us the proposed step relative to the current position + # and it tells us whether the proposed step + # has reached the trust region boundary or not. + try: + p, hits_boundary = m.solve(trust_radius) + except np.linalg.LinAlgError: + warnflag = 3 + break + + # calculate the predicted value at the proposed point + predicted_value = m(p) + + # define the local approximation at the proposed point + x_proposed = x + p + m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) + + # evaluate the ratio defined in equation (4.4) + actual_reduction = m.fun - m_proposed.fun + predicted_reduction = m.fun - predicted_value + if predicted_reduction <= 0: + warnflag = 2 + break + rho = actual_reduction / predicted_reduction + + # update the trust radius according to the actual/predicted ratio + if rho < 0.25: + trust_radius *= 0.25 + elif rho > 0.75 and hits_boundary: + trust_radius = min(2*trust_radius, max_trust_radius) + + # if the ratio is high enough then accept the proposed step + if rho > eta: + x = x_proposed + m = m_proposed + + # append the best guess, call back, increment the iteration count + if return_all: + allvecs.append(np.copy(x)) + k += 1 + + intermediate_result = OptimizeResult(x=x, fun=m.fun) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + # check if the gradient is small enough to stop + if m.jac_mag < gtol: + warnflag = 0 + break + + # check if we have looked at enough iterations + if k >= maxiter: + warnflag = 1 + break + + # print some stuff if requested + status_messages = ( + _status_message['success'], + _status_message['maxiter'], + 'A bad approximation caused failure to predict improvement.', + 'A linalg error occurred, such as a non-psd Hessian.', + ) + if disp: + if warnflag == 0: + print(status_messages[warnflag]) + else: + warnings.warn(status_messages[warnflag], RuntimeWarning, stacklevel=3) + print(" Current function value: %f" % m.fun) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0])) + + result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, + fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev, + nhev=sf.nhev + nhessp[0], nit=k, + message=status_messages[warnflag]) + + if hess is not None: + result['hess'] = m.hess + + if return_all: + result['allvecs'] = allvecs + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py new file mode 100644 index 0000000000000000000000000000000000000000..a54abd60c703408d6c87cb5020d6781fdf0213c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py @@ -0,0 +1,122 @@ +"""Dog-leg trust-region optimization.""" +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the dog-leg trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for dogleg minimization') + if not callable(hess): + raise ValueError('Hessian is required for dogleg minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=DoglegSubproblem, + **trust_region_options) + + +class DoglegSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by the dogleg method""" + + def cauchy_point(self): + """ + The Cauchy point is minimal along the direction of steepest descent. + """ + if self._cauchy_point is None: + g = self.jac + Bg = self.hessp(g) + self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g + return self._cauchy_point + + def newton_point(self): + """ + The Newton point is a global minimum of the approximate function. + """ + if self._newton_point is None: + g = self.jac + B = self.hess + cho_info = scipy.linalg.cho_factor(B) + self._newton_point = -scipy.linalg.cho_solve(cho_info, g) + return self._newton_point + + def solve(self, trust_radius): + """ + Minimize a function using the dog-leg trust-region algorithm. + + This algorithm requires function values and first and second derivatives. + It also performs a costly Hessian decomposition for most iterations, + and the Hessian is required to be positive definite. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + The Hessian is required to be positive definite. + + References + ---------- + .. [1] Jorge Nocedal and Stephen Wright, + Numerical Optimization, second edition, + Springer-Verlag, 2006, page 73. + """ + + # Compute the Newton point. + # This is the optimum for the quadratic model function. + # If it is inside the trust radius then return this point. + p_best = self.newton_point() + if scipy.linalg.norm(p_best) < trust_radius: + hits_boundary = False + return p_best, hits_boundary + + # Compute the Cauchy point. + # This is the predicted optimum along the direction of steepest descent. + p_u = self.cauchy_point() + + # If the Cauchy point is outside the trust region, + # then return the point where the path intersects the boundary. + p_u_norm = scipy.linalg.norm(p_u) + if p_u_norm >= trust_radius: + p_boundary = p_u * (trust_radius / p_u_norm) + hits_boundary = True + return p_boundary, hits_boundary + + # Compute the intersection of the trust region boundary + # and the line segment connecting the Cauchy and Newton points. + # This requires solving a quadratic equation. + # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 + # Solve this for positive time t using the quadratic formula. + _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, + trust_radius) + p_boundary = p_u + tb * (p_best - p_u) + hits_boundary = True + return p_boundary, hits_boundary diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py new file mode 100644 index 0000000000000000000000000000000000000000..21fc3d5609d2b41eb5b5ad840ef464522565054c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py @@ -0,0 +1,438 @@ +"""Nearly exact trust-region optimization subproblem.""" +import numpy as np +from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, + cho_solve) +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = ['_minimize_trustregion_exact', + 'estimate_smallest_singular_value', + 'singular_leading_submatrix', + 'IterativeSubproblem'] + + +def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + a nearly exact trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than ``gtol`` before successful + termination. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ' + 'exact minimization.') + if not callable(hess): + raise ValueError('Hessian matrix is required for trust region ' + 'exact minimization.') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=IterativeSubproblem, + **trust_region_options) + + +def estimate_smallest_singular_value(U): + """Given upper triangular matrix ``U`` estimate the smallest singular + value and the correspondent right singular vector in O(n**2) operations. + + Parameters + ---------- + U : ndarray + Square upper triangular matrix. + + Returns + ------- + s_min : float + Estimated smallest singular value of the provided matrix. + z_min : ndarray + Estimatied right singular vector. + + Notes + ----- + The procedure is based on [1]_ and is done in two steps. First, it finds + a vector ``e`` with components selected from {+1, -1} such that the + solution ``w`` from the system ``U.T w = e`` is as large as possible. + Next it estimate ``U v = w``. The smallest singular value is close + to ``norm(w)/norm(v)`` and the right singular vector is close + to ``v/norm(v)``. + + The estimation will be better more ill-conditioned is the matrix. + + References + ---------- + .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. + An estimate for the condition number of a matrix. 1979. + SIAM Journal on Numerical Analysis, 16(2), 368-375. + """ + + U = np.atleast_2d(U) + m, n = U.shape + + if m != n: + raise ValueError("A square triangular matrix should be provided.") + + # A vector `e` with components selected from {+1, -1} + # is selected so that the solution `w` to the system + # `U.T w = e` is as large as possible. Implementation + # based on algorithm 3.5.1, p. 142, from reference [2] + # adapted for lower triangular matrix. + + p = np.zeros(n) + w = np.empty(n) + + # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press. pp. 140-142. + for k in range(n): + wp = (1-p[k]) / U.T[k, k] + wm = (-1-p[k]) / U.T[k, k] + pp = p[k+1:] + U.T[k+1:, k]*wp + pm = p[k+1:] + U.T[k+1:, k]*wm + + if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): + w[k] = wp + p[k+1:] = pp + else: + w[k] = wm + p[k+1:] = pm + + # The system `U v = w` is solved using backward substitution. + v = solve_triangular(U, w) + + v_norm = norm(v) + w_norm = norm(w) + + # Smallest singular value + s_min = w_norm / v_norm + + # Associated vector + z_min = v / v_norm + + return s_min, z_min + + +def gershgorin_bounds(H): + """ + Given a square matrix ``H`` compute upper + and lower bounds for its eigenvalues (Gregoshgorin Bounds). + Defined ref. [1]. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + + H_diag = np.diag(H) + H_diag_abs = np.abs(H_diag) + H_row_sums = np.sum(np.abs(H), axis=1) + lb = np.min(H_diag + H_diag_abs - H_row_sums) + ub = np.max(H_diag - H_diag_abs + H_row_sums) + + return lb, ub + + +def singular_leading_submatrix(A, U, k): + """ + Compute term that makes the leading ``k`` by ``k`` + submatrix from ``A`` singular. + + Parameters + ---------- + A : ndarray + Symmetric matrix that is not positive definite. + U : ndarray + Upper triangular matrix resulting of an incomplete + Cholesky decomposition of matrix ``A``. + k : int + Positive integer such that the leading k by k submatrix from + `A` is the first non-positive definite leading submatrix. + + Returns + ------- + delta : float + Amount that should be added to the element (k, k) of the + leading k by k submatrix of ``A`` to make it singular. + v : ndarray + A vector such that ``v.T B v = 0``. Where B is the matrix A after + ``delta`` is added to its element (k, k). + """ + + # Compute delta + delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] + + n = len(A) + + # Inicialize v + v = np.zeros(n) + v[k-1] = 1 + + # Compute the remaining values of v by solving a triangular system. + if k != 1: + v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) + + return delta, v + + +class IterativeSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by nearly exact iterative method. + + Notes + ----- + This subproblem solver was based on [1]_, [2]_ and [3]_, + which implement similar algorithms. The algorithm is basically + that of [1]_ but ideas from [2]_ and [3]_ were also used. + + References + ---------- + .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", + Siam, pp. 169-200, 2000. + .. [2] J. Nocedal and S. Wright, "Numerical optimization", + Springer Science & Business Media. pp. 83-91, 2006. + .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", + SIAM Journal on Scientific and Statistical Computing, vol. 4(3), + pp. 553-572, 1983. + """ + + # UPDATE_COEFF appears in reference [1]_ + # in formula 7.3.14 (p. 190) named as "theta". + # As recommended there it value is fixed in 0.01. + UPDATE_COEFF = 0.01 + + EPS = np.finfo(float).eps + + def __init__(self, x, fun, jac, hess, hessp=None, + k_easy=0.1, k_hard=0.2): + + super().__init__(x, fun, jac, hess) + + # When the trust-region shrinks in two consecutive + # calculations (``tr_radius < previous_tr_radius``) + # the lower bound ``lambda_lb`` may be reused, + # facilitating the convergence. To indicate no + # previous value is known at first ``previous_tr_radius`` + # is set to -1 and ``lambda_lb`` to None. + self.previous_tr_radius = -1 + self.lambda_lb = None + + self.niter = 0 + + # ``k_easy`` and ``k_hard`` are parameters used + # to determine the stop criteria to the iterative + # subproblem solver. Take a look at pp. 194-197 + # from reference _[1] for a more detailed description. + self.k_easy = k_easy + self.k_hard = k_hard + + # Get Lapack function for cholesky decomposition. + # The implemented SciPy wrapper does not return + # the incomplete factorization needed by the method. + self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) + + # Get info about Hessian + self.dimension = len(self.hess) + self.hess_gershgorin_lb,\ + self.hess_gershgorin_ub = gershgorin_bounds(self.hess) + self.hess_inf = norm(self.hess, np.inf) + self.hess_fro = norm(self.hess, 'fro') + + # A constant such that for vectors smaller than that + # backward substituition is not reliable. It was stabilished + # based on Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press., p.165. + self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf + + def _initial_values(self, tr_radius): + """Given a trust radius, return a good initial guess for + the damping factor, the lower bound and the upper bound. + The values were chosen accordingly to the guidelines on + section 7.3.8 (p. 192) from [1]_. + """ + + # Upper bound for the damping factor + lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, + self.hess_fro, + self.hess_inf)) + + # Lower bound for the damping factor + lambda_lb = max(0, -min(self.hess.diagonal()), + self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, + self.hess_fro, + self.hess_inf)) + + # Improve bounds with previous info + if tr_radius < self.previous_tr_radius: + lambda_lb = max(self.lambda_lb, lambda_lb) + + # Initial guess for the damping factor + if lambda_lb == 0: + lambda_initial = 0 + else: + lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + return lambda_initial, lambda_lb, lambda_ub + + def solve(self, tr_radius): + """Solve quadratic subproblem""" + + lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) + n = self.dimension + hits_boundary = True + already_factorized = False + self.niter = 0 + + while True: + + # Compute Cholesky factorization + if already_factorized: + already_factorized = False + else: + H = self.hess+lambda_current*np.eye(n) + U, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + self.niter += 1 + + # Check if factorization succeeded + if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: + # Successful factorization + + # Solve `U.T U p = s` + p = cho_solve((U, False), -self.jac) + + p_norm = norm(p) + + # Check for interior convergence + if p_norm <= tr_radius and lambda_current == 0: + hits_boundary = False + break + + # Solve `U.T w = p` + w = solve_triangular(U, p, trans='T') + + w_norm = norm(w) + + # Compute Newton step accordingly to + # formula (4.44) p.87 from ref [2]_. + delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius + lambda_new = lambda_current + delta_lambda + + if p_norm < tr_radius: # Inside boundary + s_min, z_min = estimate_smallest_singular_value(U) + + ta, tb = self.get_boundaries_intersections(p, z_min, + tr_radius) + + # Choose `step_len` with the smallest magnitude. + # The reason for this choice is explained at + # ref [3]_, p. 6 (Immediately before the formula + # for `tau`). + step_len = min([ta, tb], key=abs) + + # Compute the quadratic term (p.T*H*p) + quadratic_term = np.dot(p, np.dot(H, p)) + + # Check stop criteria + relative_error = ((step_len**2 * s_min**2) + / (quadratic_term + lambda_current*tr_radius**2)) + if relative_error <= self.k_hard: + p += step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Compute Cholesky factorization + H = self.hess + lambda_new*np.eye(n) + c, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + # Check if the factorization have succeeded + # + if info == 0: # Successful factorization + # Update damping factor + lambda_current = lambda_new + already_factorized = True + else: # Unsuccessful factorization + # Update uncertanty bounds + lambda_lb = max(lambda_lb, lambda_new) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Outside boundary + # Check stop criteria + relative_error = abs(p_norm - tr_radius) / tr_radius + if relative_error <= self.k_easy: + break + + # Update uncertanty bounds + lambda_lb = lambda_current + + # Update damping factor + lambda_current = lambda_new + + elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: + # jac_mag very close to zero + + # Check for interior convergence + if lambda_current == 0: + p = np.zeros(n) + hits_boundary = False + break + + s_min, z_min = estimate_smallest_singular_value(U) + step_len = tr_radius + + # Check stop criteria + if (step_len**2 * s_min**2 + <= self.k_hard * lambda_current * tr_radius**2): + p = step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Unsuccessful factorization + + # Compute auxiliary terms + delta, v = singular_leading_submatrix(H, U, info) + v_norm = norm(v) + + # Update uncertanty interval + lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + self.lambda_lb = lambda_lb + self.lambda_current = lambda_current + self.previous_tr_radius = tr_radius + + return p, hits_boundary diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py new file mode 100644 index 0000000000000000000000000000000000000000..fed17ff8b84eaf019c0ad69a03f260ca674477ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py @@ -0,0 +1,126 @@ +"""Newton-CG trust-region optimization.""" +import math + +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the Newton conjugate gradient trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for Newton-CG trust-region ' + 'minimization') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Newton-CG trust-region minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + hessp=hessp, subproblem=CGSteihaugSubproblem, + **trust_region_options) + + +class CGSteihaugSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by a conjugate gradient method""" + def solve(self, trust_radius): + """ + Solve the subproblem using a conjugate gradient method. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + This is algorithm (7.2) of Nocedal and Wright 2nd edition. + Only the function that computes the Hessian-vector product is required. + The Hessian itself is not required, and the Hessian does + not need to be positive semidefinite. + """ + + # get the norm of jacobian and define the origin + p_origin = np.zeros_like(self.jac) + + # define a default tolerance + tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag + + # Stop the method if the search direction + # is a direction of nonpositive curvature. + if self.jac_mag < tolerance: + hits_boundary = False + return p_origin, hits_boundary + + # init the state for the first iteration + z = p_origin + r = self.jac + d = -r + + # Search for the min of the approximation of the objective function. + while True: + + # do an iteration + Bd = self.hessp(d) + dBd = np.dot(d, Bd) + if dBd <= 0: + # Look at the two boundary points. + # Find both values of t to get the boundary points such that + # ||z + t d|| == trust_radius + # and then choose the one with the predicted min value. + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + pa = z + ta * d + pb = z + tb * d + if self(pa) < self(pb): + p_boundary = pa + else: + p_boundary = pb + hits_boundary = True + return p_boundary, hits_boundary + r_squared = np.dot(r, r) + alpha = r_squared / dBd + z_next = z + alpha * d + if scipy.linalg.norm(z_next) >= trust_radius: + # Find t >= 0 to get the boundary point such that + # ||z + t d|| == trust_radius + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + p_boundary = z + tb * d + hits_boundary = True + return p_boundary, hits_boundary + r_next = r + alpha * Bd + r_next_squared = np.dot(r_next, r_next) + if math.sqrt(r_next_squared) < tolerance: + hits_boundary = False + return z_next, hits_boundary + beta_next = r_next_squared / r_squared + d_next = -r_next + beta_next * d + + # update the state for the next iteration + z = z_next + r = r_next + d = d_next diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tstutils.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tstutils.py new file mode 100644 index 0000000000000000000000000000000000000000..344c6764a37471ed032ecd0b05f7d93e9f4a6536 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_tstutils.py @@ -0,0 +1,968 @@ +r""" +Parameters used in test and benchmark methods. + +Collections of test cases suitable for testing 1-D root-finders + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval + with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a left- and right- discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, + randomly negative on the other. + f4 - f6 are not continuous at the root. + + 'aps': The test problems in the 1995 paper + TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The test cases are provided as a list of dictionaries. The dictionary + keys will be a subset of: + ["f", "fprime", "fprime2", "args", "bracket", "smoothness", + "a", "b", "x0", "x1", "root", "ID"] +""" + +# Sources: +# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# "Algorithm 748: Enclosing Zeros of Continuous Functions", +# ACM Trans. Math. Softw. Volume 221(1995) +# doi = {10.1145/210089.210111}, +# [2] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm +# for finding the zero of a nonlinear function without using derivatives." +# Advances in Engineering Software 28.3 (1997): 145-149. + +from random import random + +import numpy as np + +from scipy.optimize import _zeros_py as cc + +# "description" refers to the original functions +description = """ +f2 is a symmetric parabola, x**2 - 1 +f3 is a quartic polynomial with large hump in interval +f4 is step function with a discontinuity at 1 +f5 is a hyperbola with vertical asymptote at 1 +f6 has random values positive to left of 1, negative to right + +Of course, these are not real problems. They just test how the +'good' solvers behave in bad circumstances where bisection is +really the best. A good solver should not be much worse than +bisection in such circumstance, while being faster for smooth +monotone sorts of functions. +""" + + +def f1(x): + r"""f1 is a quadratic with roots at 0 and 1""" + return x * (x - 1.) + + +def f1_fp(x): + return 2 * x - 1 + + +def f1_fpp(x): + return 2 + + +def f2(x): + r"""f2 is a symmetric parabola, x**2 - 1""" + return x**2 - 1 + + +def f2_fp(x): + return 2 * x + + +def f2_fpp(x): + return 2 + + +def f3(x): + r"""A quartic with roots at 0, 1, 2 and 3""" + return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x + + +def f3_fp(x): + return 4 * x**3 - 18 * x**2 + 22 * x - 6 + + +def f3_fpp(x): + return 12 * x**2 - 36 * x + 22 + + +def f4(x): + r"""Piecewise linear, left- and right- discontinuous at x=1, the root.""" + if x > 1: + return 1.0 + .1 * x + if x < 1: + return -1.0 + .1 * x + return 0 + + +def f5(x): + r""" + Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root. + """ + if x != 1: + return 1.0 / (1. - x) + return 0 + + +# f6(x) returns random value. Without memoization, calling twice with the +# same x returns different values, hence a "random value", not a +# "function with random values" +_f6_cache = {} +def f6(x): + v = _f6_cache.get(x, None) + if v is None: + if x > 1: + v = random() + elif x < 1: + v = -random() + else: + v = 0 + _f6_cache[x] = v + return v + + +# Each Original test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability (smoothness) on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case + +_ORIGINAL_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_ORIGINAL_TESTS = [ + [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"], + [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"], + [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"], + [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"], + [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"], + [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"] +] + +_ORIGINAL_TESTS_DICTS = [ + dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS +] + +# ################## +# "APS" test cases +# Functions and test cases that appear in [1] + + +def aps01_f(x): + r"""Straightforward sum of trigonometric function and polynomial""" + return np.sin(x) - x / 2 + + +def aps01_fp(x): + return np.cos(x) - 1.0 / 2 + + +def aps01_fpp(x): + return -np.sin(x) + + +def aps02_f(x): + r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0""" + ii = np.arange(1, 21) + return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3) + + +def aps02_fp(x): + ii = np.arange(1, 21) + return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4) + + +def aps02_fpp(x): + ii = np.arange(1, 21) + return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5) + + +def aps03_f(x, a, b): + r"""Rapidly changing at the root""" + return a * x * np.exp(b * x) + + +def aps03_fp(x, a, b): + return a * (b * x + 1) * np.exp(b * x) + + +def aps03_fpp(x, a, b): + return a * (b * (b * x + 1) + b) * np.exp(b * x) + + +def aps04_f(x, n, a): + r"""Medium-degree polynomial""" + return x**n - a + + +def aps04_fp(x, n, a): + return n * x**(n - 1) + + +def aps04_fpp(x, n, a): + return n * (n - 1) * x**(n - 2) + + +def aps05_f(x): + r"""Simple Trigonometric function""" + return np.sin(x) - 1.0 / 2 + + +def aps05_fp(x): + return np.cos(x) + + +def aps05_fpp(x): + return -np.sin(x) + + +def aps06_f(x, n): + r"""Exponential rapidly changing from -1 to 1 at x=0""" + return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1 + + +def aps06_fp(x, n): + return 2 * np.exp(-n) + 2 * n * np.exp(-n * x) + + +def aps06_fpp(x, n): + return -2 * n * n * np.exp(-n * x) + + +def aps07_f(x, n): + r"""Upside down parabola with parametrizable height""" + return (1 + (1 - n)**2) * x - (1 - n * x)**2 + + +def aps07_fp(x, n): + return (1 + (1 - n)**2) + 2 * n * (1 - n * x) + + +def aps07_fpp(x, n): + return -2 * n * n + + +def aps08_f(x, n): + r"""Degree n polynomial""" + return x * x - (1 - x)**n + + +def aps08_fp(x, n): + return 2 * x + n * (1 - x)**(n - 1) + + +def aps08_fpp(x, n): + return 2 - n * (n - 1) * (1 - x)**(n - 2) + + +def aps09_f(x, n): + r"""Upside down quartic with parametrizable height""" + return (1 + (1 - n)**4) * x - (1 - n * x)**4 + + +def aps09_fp(x, n): + return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3 + + +def aps09_fpp(x, n): + return -12 * n * (1 - n * x)**2 + + +def aps10_f(x, n): + r"""Exponential plus a polynomial""" + return np.exp(-n * x) * (x - 1) + x**n + + +def aps10_fp(x, n): + return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1) + + +def aps10_fpp(x, n): + return (np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + + n * (n - 1) * x**(n - 2)) + + +def aps11_f(x, n): + r"""Rational function with a zero at x=1/n and a pole at x=0""" + return (n * x - 1) / ((n - 1) * x) + + +def aps11_fp(x, n): + return 1 / (n - 1) / x**2 + + +def aps11_fpp(x, n): + return -2 / (n - 1) / x**3 + + +def aps12_f(x, n): + r"""nth root of x, with a zero at x=n""" + return np.power(x, 1.0 / n) - np.power(n, 1.0 / n) + + +def aps12_fp(x, n): + return np.power(x, (1.0 - n) / n) / n + + +def aps12_fpp(x, n): + return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n + + +_MAX_EXPABLE = np.log(np.finfo(float).max) + + +def aps13_f(x): + r"""Function with *all* derivatives 0 at the root""" + if x == 0: + return 0 + # x2 = 1.0/x**2 + # if x2 > 708: + # return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return x / np.exp(y) + + +def aps13_fp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return (1 + 2 / x**2) / np.exp(y) + + +def aps13_fpp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return 2 * (2 - x**2) / x**5 / np.exp(y) + + +def aps14_f(x, n): + r"""0 for negative x-values, trigonometric+linear for x positive""" + if x <= 0: + return -n / 20.0 + return n / 20.0 * (x / 1.5 + np.sin(x) - 1) + + +def aps14_fp(x, n): + if x <= 0: + return 0 + return n / 20.0 * (1.0 / 1.5 + np.cos(x)) + + +def aps14_fpp(x, n): + if x <= 0: + return 0 + return -n / 20.0 * (np.sin(x)) + + +def aps15_f(x, n): + r"""piecewise linear, constant outside of [0, 0.002/(1+n)]""" + if x < 0: + return -0.859 + if x > 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) - 1.859 + + +def aps15_fp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 + + +def aps15_fpp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000 + + +# Each APS test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability of the function on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley methods need a single +# starting point x0, which was chosen to be near the middle of the interval, +# unless that would have made the problem too easy. + +_APS_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_APS_TESTS = [ + [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, + 3, 1.89549426703398094e+00, "aps.01.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, + 2, 3.02291534727305677e+00, "aps.02.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, + 5, 6.68375356080807848e+00, "aps.02.01"], + [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, + 10, 1.12387016550022114e+01, "aps.02.02"], + [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, + 17, 1.96760000806234103e+01, "aps.02.03"], + [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, + 26, 2.98282273265047557e+01, "aps.02.04"], + [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, + 37, 4.19061161952894139e+01, "aps.02.05"], + [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, + 50, 5.59535958001430913e+01, "aps.02.06"], + [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, + 65, 7.19856655865877997e+01, "aps.02.07"], + [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, + 82, 9.00088685391666701e+01, "aps.02.08"], + [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, + 101, 1.10026532748330197e+02, "aps.02.09"], + [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, + -2, 0, "aps.03.00"], + [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, + -2, 0, "aps.03.01"], + [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, + -2, 0, "aps.03.02"], + [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, + 2.5, 6.68740304976422006e-01, "aps.04.00"], + [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, + 2.5, 7.64724491331730039e-01, "aps.04.01"], + [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, + 2.5, 8.17765433957942545e-01, "aps.04.02"], + [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, + 2.5, 8.51339922520784609e-01, "aps.04.03"], + [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, + 2.5, 8.74485272221167897e-01, "aps.04.04"], + [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.05"], + [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.06"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.07"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.08"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.09"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.10"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.11"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.12"], + [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.13"], + [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, + 1.3, np.pi / 6, "aps.05.00"], + [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, + 0.5, 4.22477709641236709e-01, "aps.06.00"], + [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, + 0.5, 3.06699410483203705e-01, "aps.06.01"], + [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, + 0.5, 2.23705457654662959e-01, "aps.06.02"], + [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, + 0.5, 1.71719147519508369e-01, "aps.06.03"], + [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, + 0.4, 1.38257155056824066e-01, "aps.06.04"], + [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, + 0.1, 3.46573590208538521e-02, "aps.06.05"], + [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, + 5e-02, 1.73286795139986315e-02, "aps.06.06"], + [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, + 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"], + [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, + 2.5e-02, 8.66433975699931573e-03, "aps.06.08"], + [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, + 2e-02, 6.93147180559945415e-03, "aps.06.09"], + [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, + 0.4, 3.84025518406218985e-02, "aps.07.00"], + [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, + 0.4, 9.90000999800049949e-03, "aps.07.01"], + [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, + 0.4, 2.49375003906201174e-03, "aps.07.02"], + [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, + 0.9, 0.5, "aps.08.00"], + [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, + 0.9, 3.45954815848242059e-01, "aps.08.01"], + [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, + 0.9, 2.45122333753307220e-01, "aps.08.02"], + [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, + 0.9, 1.95547623536565629e-01, "aps.08.03"], + [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, + 0.9, 1.64920957276440960e-01, "aps.08.04"], + [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, + 0.5, 2.75508040999484394e-01, "aps.09.00"], + [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, + 0.5, 1.37754020499742197e-01, "aps.09.01"], + [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, + 0.5, 1.03052837781564422e-02, "aps.09.02"], + [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, + 0.5, 3.61710817890406339e-03, "aps.09.03"], + [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, + 0.5, 4.10872918496395375e-04, "aps.09.04"], + [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, + 0.5, 2.59895758929076292e-05, "aps.09.05"], + [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, + 0.5, 7.66859512218533719e-06, "aps.09.06"], + [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, + 0.9, 4.01058137541547011e-01, "aps.10.00"], + [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, + 0.9, 5.16153518757933583e-01, "aps.10.01"], + [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, + 0.9, 5.39522226908415781e-01, "aps.10.02"], + [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, + 0.9, 5.48182294340655241e-01, "aps.10.03"], + [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, + 0.9, 5.52704666678487833e-01, "aps.10.04"], + [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, + 1e-02, 1.0 / 2, "aps.11.00"], + [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, + 1e-02, 1.0 / 5, "aps.11.01"], + [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, + 1e-02, 1.0 / 15, "aps.11.02"], + [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, + 1e-02, 1.0 / 20, "aps.11.03"], + [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, + 1.1, 2, "aps.12.00"], + [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, + 1.1, 3, "aps.12.01"], + [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, + 1.1, 4, "aps.12.02"], + [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, + 1.1, 5, "aps.12.03"], + [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, + 1.1, 6, "aps.12.04"], + [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, + 1.1, 7, "aps.12.05"], + [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, + 1.1, 9, "aps.12.06"], + [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, + 1.1, 11, "aps.12.07"], + [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, + 1.1, 13, "aps.12.08"], + [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, + 1.1, 15, "aps.12.09"], + [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, + 1.1, 17, "aps.12.10"], + [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, + 1.1, 19, "aps.12.11"], + [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, + 1.1, 21, "aps.12.12"], + [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, + 1.1, 23, "aps.12.13"], + [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, + 1.1, 25, "aps.12.14"], + [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, + 1.1, 27, "aps.12.15"], + [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, + 1.1, 29, "aps.12.16"], + [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, + 1.1, 31, "aps.12.17"], + [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, + 1.1, 33, "aps.12.18"], + [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, + 1.5, 0, "aps.13.00"], + [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.00"], + [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.01"], + [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.02"], + [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.03"], + [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.04"], + [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.05"], + [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.06"], + [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.07"], + [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.08"], + [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.09"], + [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.10"], + [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.11"], + [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.12"], + [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.13"], + [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.14"], + [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.15"], + [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.16"], + [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.17"], + [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.18"], + [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.19"], + [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.20"], + [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.21"], + [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.22"], + [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.23"], + [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.24"], + [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.25"], + [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.26"], + [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.27"], + [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.28"], + [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.29"], + [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.30"], + [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.31"], + [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.32"], + [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.33"], + [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.34"], + [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.35"], + [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.36"], + [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.37"], + [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.38"], + [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.39"], + [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, + -2, 5.90513055942197166e-05, "aps.15.00"], + [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, + -2, 5.63671553399369967e-05, "aps.15.01"], + [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, + -2, 5.39164094555919196e-05, "aps.15.02"], + [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, + -2, 5.16698923949422470e-05, "aps.15.03"], + [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, + -2, 4.96030966991445609e-05, "aps.15.04"], + [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, + -2, 4.76952852876389951e-05, "aps.15.05"], + [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, + -2, 4.59287932399486662e-05, "aps.15.06"], + [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, + -2, 4.42884791956647841e-05, "aps.15.07"], + [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, + -2, 4.27612902578832391e-05, "aps.15.08"], + [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, + -2, 4.13359139159538030e-05, "aps.15.09"], + [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, + -2, 4.00024973380198076e-05, "aps.15.10"], + [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, + -2, 3.87524192962066869e-05, "aps.15.11"], + [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, + -2, 3.75781035599579910e-05, "aps.15.12"], + [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, + -2, 3.64728652199592355e-05, "aps.15.13"], + [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, + -2, 3.54307833565318273e-05, "aps.15.14"], + [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, + -2, 3.44465949299614980e-05, "aps.15.15"], + [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, + -2, 3.35156058778003705e-05, "aps.15.16"], + [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, + -2, 3.26336162494372125e-05, "aps.15.17"], + [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, + -2, 3.17968568584260013e-05, "aps.15.18"], + [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, + -2, 3.10019354369653455e-05, "aps.15.19"], + [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, + -2, 3.02457906702100968e-05, "aps.15.20"], + [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, + -2, 1.22779942324615231e-05, "aps.15.21"], + [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, + -2, 6.16953939044086617e-06, "aps.15.22"], + [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, + -2, 4.11985852982928163e-06, "aps.15.23"], + [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, + -2, 3.09246238772721682e-06, "aps.15.24"], + [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, + -2, 2.47520442610501789e-06, "aps.15.25"], + [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, + -2, 2.06335676785127107e-06, "aps.15.26"], + [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, + -2, 1.76901200781542651e-06, "aps.15.27"], + [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, + -2, 1.54816156988591016e-06, "aps.15.28"], + [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, + -2, 1.37633453660223511e-06, "aps.15.29"], + [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, + -2, 1.23883857889971403e-06, "aps.15.30"] +] + +_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS] + + +# ################## +# "complex" test cases +# A few simple, complex-valued, functions, defined on the complex plane. + + +def cplx01_f(z, n, a): + r"""z**n-a: Use to find the nth root of a""" + return z**n - a + + +def cplx01_fp(z, n, a): + return n * z**(n - 1) + + +def cplx01_fpp(z, n, a): + return n * (n - 1) * z**(n - 2) + + +def cplx02_f(z, a): + r"""e**z - a: Use to find the log of a""" + return np.exp(z) - a + + +def cplx02_fp(z, a): + return np.exp(z) + + +def cplx02_fpp(z, a): + return np.exp(z) + + +# Each "complex" test case has +# - a function and its two derivatives, +# - additional arguments, +# - the order of differentiability of the function on this interval +# - two starting values x0 and x1 +# - the root +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley need a single starting point +# x0, which was chosen to be near the middle of the interval, unless that +# would make the problem too easy. + + +_COMPLEX_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID" +] +_COMPLEX_TESTS = [ + [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, + (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, + (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.01"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, + 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.02"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, + 5, 4, 2, "complex.01.03"], + [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"], + [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"], +] + +_COMPLEX_TESTS_DICTS = [ + dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS +] + + +def _add_a_b(tests): + r"""Add "a" and "b" keys to each test from the "bracket" value""" + for d in tests: + for k, v in zip(['a', 'b'], d.get('bracket', [])): + d[k] = v + + +_add_a_b(_ORIGINAL_TESTS_DICTS) +_add_a_b(_APS_TESTS_DICTS) +_add_a_b(_COMPLEX_TESTS_DICTS) + + +def get_tests(collection='original', smoothness=None): + r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys + + Allowed values of collection: + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a single discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, randomly negative on the other + 'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + paper by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The dictionary keys will be a subset of + ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"] + """ # noqa: E501 + collection = collection or "original" + subsets = {"aps": _APS_TESTS_DICTS, + "complex": _COMPLEX_TESTS_DICTS, + "original": _ORIGINAL_TESTS_DICTS, + "chandrupatla": _CHANDRUPATLA_TESTS_DICTS} + tests = subsets.get(collection, []) + if smoothness is not None: + tests = [tc for tc in tests if tc['smoothness'] >= smoothness] + return tests + + +# Backwards compatibility +methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq] +mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq'] +functions = [f2, f3, f4, f5, f6] +fstrings = ['f2', 'f3', 'f4', 'f5', 'f6'] + +# ################## +# "Chandrupatla" test cases +# Functions and test cases that appear in [2] + +def fun1(x): + return x**3 - 2*x - 5 +fun1.root = 2.0945514815423265 # additional precision using mpmath.findroot + + +def fun2(x): + return 1 - 1/x**2 +fun2.root = 1 + + +def fun3(x): + return (x-3)**3 +fun3.root = 3 + + +def fun4(x): + return 6*(x-2)**5 +fun4.root = 2 + + +def fun5(x): + return x**9 +fun5.root = 0 + + +def fun6(x): + return x**19 +fun6.root = 0 + + +def fun7(x): + return 0 if abs(x) < 3.8e-4 else x*np.exp(-x**(-2)) +fun7.root = 0 + + +def fun8(x): + xi = 0.61489 + return -(3062*(1-xi)*np.exp(-x))/(xi + (1-xi)*np.exp(-x)) - 1013 + 1628/x +fun8.root = 1.0375360332870405 + + +def fun9(x): + return np.exp(x) - 2 - 0.01/x**2 + .000002/x**3 +fun9.root = 0.7032048403631358 + +# Each "chandropatla" test case has +# - a function, +# - two starting values x0 and x1 +# - the root +# - the number of function evaluations required by Chandrupatla's algorithm +# - an Identifier of the test case +# +# Chandrupatla's is a bracketing algorithm, so a bracketing interval was +# provided in [2] for each test case. No special support for testing with +# secant/Newton/Halley is provided. + +_CHANDRUPATLA_TESTS_KEYS = ["f", "bracket", "root", "nfeval", "ID"] +_CHANDRUPATLA_TESTS = [ + [fun1, [2, 3], fun1.root, 7], + [fun1, [1, 10], fun1.root, 11], + [fun1, [1, 100], fun1.root, 14], + [fun1, [-1e4, 1e4], fun1.root, 23], + [fun1, [-1e10, 1e10], fun1.root, 43], + [fun2, [0.5, 1.51], fun2.root, 8], + [fun2, [1e-4, 1e4], fun2.root, 22], + [fun2, [1e-6, 1e6], fun2.root, 28], + [fun2, [1e-10, 1e10], fun2.root, 41], + [fun2, [1e-12, 1e12], fun2.root, 48], + [fun3, [0, 5], fun3.root, 21], + [fun3, [-10, 10], fun3.root, 23], + [fun3, [-1e4, 1e4], fun3.root, 36], + [fun3, [-1e6, 1e6], fun3.root, 45], + [fun3, [-1e10, 1e10], fun3.root, 55], + [fun4, [0, 5], fun4.root, 21], + [fun4, [-10, 10], fun4.root, 23], + [fun4, [-1e4, 1e4], fun4.root, 33], + [fun4, [-1e6, 1e6], fun4.root, 43], + [fun4, [-1e10, 1e10], fun4.root, 54], + [fun5, [-1, 4], fun5.root, 21], + [fun5, [-2, 5], fun5.root, 22], + [fun5, [-1, 10], fun5.root, 23], + [fun5, [-5, 50], fun5.root, 25], + [fun5, [-10, 100], fun5.root, 26], + [fun6, [-1., 4.], fun6.root, 21], + [fun6, [-2., 5.], fun6.root, 22], + [fun6, [-1., 10.], fun6.root, 23], + [fun6, [-5., 50.], fun6.root, 25], + [fun6, [-10., 100.], fun6.root, 26], + [fun7, [-1, 4], fun7.root, 8], + [fun7, [-2, 5], fun7.root, 8], + [fun7, [-1, 10], fun7.root, 11], + [fun7, [-5, 50], fun7.root, 18], + [fun7, [-10, 100], fun7.root, 19], + [fun8, [2e-4, 2], fun8.root, 9], + [fun8, [2e-4, 3], fun8.root, 10], + [fun8, [2e-4, 9], fun8.root, 11], + [fun8, [2e-4, 27], fun8.root, 12], + [fun8, [2e-4, 81], fun8.root, 14], + [fun9, [2e-4, 1], fun9.root, 7], + [fun9, [2e-4, 3], fun9.root, 8], + [fun9, [2e-4, 9], fun9.root, 10], + [fun9, [2e-4, 27], fun9.root, 11], + [fun9, [2e-4, 81], fun9.root, 13], +] +_CHANDRUPATLA_TESTS = [test + [f'{test[0].__name__}.{i%5+1}'] + for i, test in enumerate(_CHANDRUPATLA_TESTS)] + +_CHANDRUPATLA_TESTS_DICTS = [dict(zip(_CHANDRUPATLA_TESTS_KEYS, testcase)) + for testcase in _CHANDRUPATLA_TESTS] +_add_a_b(_CHANDRUPATLA_TESTS_DICTS) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7af3971d3d8f44903d9da2be40e4e414cbf6463d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py new file mode 100644 index 0000000000000000000000000000000000000000..986031920d69578c1c7c470b03deae5b3d24c309 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py @@ -0,0 +1,1403 @@ +import warnings +from collections import namedtuple +import operator +from . import _zeros +from ._optimize import OptimizeResult +import numpy as np + + +_iter = 100 +_xtol = 2e-12 +_rtol = 4 * np.finfo(float).eps + +__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', + 'RootResults'] + +# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h +_ECONVERGED = 0 +_ESIGNERR = -1 # used in _chandrupatla +_ECONVERR = -2 +_EVALUEERR = -3 +_ECALLBACK = -4 +_EINPROGRESS = 1 + +CONVERGED = 'converged' +SIGNERR = 'sign error' +CONVERR = 'convergence error' +VALUEERR = 'value error' +INPROGRESS = 'No error' + + +flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR, + _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS} + + +class RootResults(OptimizeResult): + """Represents the root finding result. + + Attributes + ---------- + root : float + Estimated root location. + iterations : int + Number of iterations needed to find the root. + function_calls : int + Number of times the function was called. + converged : bool + True if the routine converged. + flag : str + Description of the cause of termination. + method : str + Root finding method used. + + """ + + def __init__(self, root, iterations, function_calls, flag, method): + self.root = root + self.iterations = iterations + self.function_calls = function_calls + self.converged = flag == _ECONVERGED + if flag in flag_map: + self.flag = flag_map[flag] + else: + self.flag = flag + self.method = method + + +def results_c(full_output, r, method): + if full_output: + x, funcalls, iterations, flag = r + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + else: + return r + + +def _results_select(full_output, r, method): + """Select from a tuple of (root, funccalls, iterations, flag)""" + x, funcalls, iterations, flag = r + if full_output: + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + return x + + +def _wrap_nan_raise(f): + + def f_raise(x, *args): + fx = f(x, *args) + f_raise._function_calls += 1 + if np.isnan(fx): + msg = (f'The function value at x={x} is NaN; ' + 'solver cannot continue.') + err = ValueError(msg) + err._x = x + err._function_calls = f_raise._function_calls + raise err + return fx + + f_raise._function_calls = 0 + return f_raise + + +def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, + fprime2=None, x1=None, rtol=0.0, + full_output=False, disp=True): + """ + Find a root of a real or complex function using the Newton-Raphson + (or secant or Halley's) method. + + Find a root of the scalar-valued function `func` given a nearby scalar + starting point `x0`. + The Newton-Raphson method is used if the derivative `fprime` of `func` + is provided, otherwise the secant method is used. If the second order + derivative `fprime2` of `func` is also provided, then Halley's method is + used. + + If `x0` is a sequence with more than one item, `newton` returns an array: + the roots of the function from each (scalar) starting point in `x0`. + In this case, `func` must be vectorized to return a sequence or array of + the same shape as its first argument. If `fprime` (`fprime2`) is given, + then its return must also have the same shape: each element is the first + (second) derivative of `func` with respect to its only variable evaluated + at each element of its first argument. + + `newton` is for finding roots of a scalar-valued functions of a single + variable. For problems involving several variables, see `root`. + + Parameters + ---------- + func : callable + The function whose root is wanted. It must be a function of a + single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` + are extra arguments that can be passed in the `args` parameter. + x0 : float, sequence, or ndarray + An initial estimate of the root that should be somewhere near the + actual root. If not scalar, then `func` must be vectorized and return + a sequence or array of the same shape as its first argument. + fprime : callable, optional + The derivative of the function when available and convenient. If it + is None (default), then the secant method is used. + args : tuple, optional + Extra arguments to be used in the function call. + tol : float, optional + The allowable error of the root's value. If `func` is complex-valued, + a larger `tol` is recommended as both the real and imaginary parts + of `x` contribute to ``|x - x0|``. + maxiter : int, optional + Maximum number of iterations. + fprime2 : callable, optional + The second order derivative of the function when available and + convenient. If it is None (default), then the normal Newton-Raphson + or the secant method is used. If it is not None, then Halley's method + is used. + x1 : float, optional + Another estimate of the root that should be somewhere near the + actual root. Used if `fprime` is not provided. + rtol : float, optional + Tolerance (relative) for termination. + full_output : bool, optional + If `full_output` is False (default), the root is returned. + If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` + is the root and ``r`` is a `RootResults` object. + If True and `x0` is non-scalar, the return value is ``(x, converged, + zero_der)`` (see Returns section for details). + disp : bool, optional + If True, raise a RuntimeError if the algorithm didn't converge, with + the error message containing the number of iterations and current + function value. Otherwise, the convergence status is recorded in a + `RootResults` return object. + Ignored if `x0` is not scalar. + *Note: this has little to do with displaying, however, + the `disp` keyword cannot be renamed for backwards compatibility.* + + Returns + ------- + root : float, sequence, or ndarray + Estimated location where function is zero. + r : `RootResults`, optional + Present if ``full_output=True`` and `x0` is scalar. + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + converged : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements converged successfully. + zero_der : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements had a zero derivative. + + See Also + -------- + root_scalar : interface to root solvers for scalar functions + root : interface to root solvers for multi-input, multi-output functions + + Notes + ----- + The convergence rate of the Newton-Raphson method is quadratic, + the Halley method is cubic, and the secant method is + sub-quadratic. This means that if the function is well-behaved + the actual error in the estimated root after the nth iteration + is approximately the square (cube for Halley) of the error + after the (n-1)th step. However, the stopping criterion used + here is the step size and there is no guarantee that a root + has been found. Consequently, the result should be verified. + Safer algorithms are brentq, brenth, ridder, and bisect, + but they all require that the root first be bracketed in an + interval where the function changes sign. The brentq algorithm + is recommended for general use in one dimensional problems + when such an interval has been found. + + When `newton` is used with arrays, it is best suited for the following + types of problems: + + * The initial guesses, `x0`, are all relatively the same distance from + the roots. + * Some or all of the extra arguments, `args`, are also arrays so that a + class of similar problems can be solved together. + * The size of the initial guesses, `x0`, is larger than O(100) elements. + Otherwise, a naive loop may perform as well or better than a vector. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import optimize + + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + ``fprime`` is not provided, use the secant method: + + >>> root = optimize.newton(f, 1.5) + >>> root + 1.0000000000000016 + >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) + >>> root + 1.0000000000000016 + + Only ``fprime`` is provided, use the Newton-Raphson method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) + >>> root + 1.0 + + Both ``fprime2`` and ``fprime`` are provided, use Halley's method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, + ... fprime2=lambda x: 6 * x) + >>> root + 1.0 + + When we want to find roots for a set of related starting values and/or + function parameters, we can provide both of those as an array of inputs: + + >>> f = lambda x, a: x**3 - a + >>> fder = lambda x, a: 3 * x**2 + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(100) + >>> a = np.arange(-50, 50) + >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200) + + The above is the equivalent of solving for each value in ``(x, a)`` + separately in a for-loop, just faster: + + >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), + ... maxiter=200) + ... for x0, a0 in zip(x, a)] + >>> np.allclose(vec_res, loop_res) + True + + Plot the results found for all values of ``a``: + + >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) + >>> fig, ax = plt.subplots() + >>> ax.plot(a, analytical_result, 'o') + >>> ax.plot(a, vec_res, '.') + >>> ax.set_xlabel('$a$') + >>> ax.set_ylabel('$x$ where $f(x, a)=0$') + >>> plt.show() + + """ + if tol <= 0: + raise ValueError("tol too small (%g <= 0)" % tol) + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if np.size(x0) > 1: + return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, + full_output) + + # Convert to float (don't use float(x0); this works also for complex x0) + # Use np.asarray because we want x0 to be a numpy object, not a Python + # object. e.g. np.complex(1+1j) > 0 is possible, but (1 + 1j) > 0 raises + # a TypeError + x0 = np.asarray(x0)[()] * 1.0 + p0 = x0 + funcalls = 0 + if fprime is not None: + # Newton-Raphson method + method = "newton" + for itr in range(maxiter): + # first evaluate fval + fval = func(p0, *args) + funcalls += 1 + # If fval is 0, a root has been found, then terminate + if fval == 0: + return _results_select( + full_output, (p0, funcalls, itr, _ECONVERGED), method) + fder = fprime(p0, *args) + funcalls += 1 + if fder == 0: + msg = "Derivative was zero." + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p0)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + return _results_select( + full_output, (p0, funcalls, itr + 1, _ECONVERR), method) + newton_step = fval / fder + if fprime2: + fder2 = fprime2(p0, *args) + funcalls += 1 + method = "halley" + # Halley's method: + # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) + # Only do it if denominator stays close enough to 1 + # Rationale: If 1-adj < 0, then Halley sends x in the + # opposite direction to Newton. Doesn't happen if x is close + # enough to root. + adj = newton_step * fder2 / fder / 2 + if np.abs(adj) < 1: + newton_step /= 1.0 - adj + p = p0 - newton_step + if np.isclose(p, p0, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0 = p + else: + # Secant method + method = "secant" + if x1 is not None: + if x1 == x0: + raise ValueError("x1 and x0 must be different") + p1 = x1 + else: + eps = 1e-4 + p1 = x0 * (1 + eps) + p1 += (eps if p1 >= 0 else -eps) + q0 = func(p0, *args) + funcalls += 1 + q1 = func(p1, *args) + funcalls += 1 + if abs(q1) < abs(q0): + p0, p1, q0, q1 = p1, p0, q1, q0 + for itr in range(maxiter): + if q1 == q0: + if p1 != p0: + msg = "Tolerance of %s reached." % (p1 - p0) + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p1)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + p = (p1 + p0) / 2.0 + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERR), method) + else: + if abs(q1) > abs(q0): + p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) + else: + p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) + if np.isclose(p, p1, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0, q0 = p1, q1 + p1 = p + q1 = func(p1, *args) + funcalls += 1 + + if disp: + msg = ("Failed to converge after %d iterations, value is %s." + % (itr + 1, p)) + raise RuntimeError(msg) + + return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR), method) + + +def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): + """ + A vectorized version of Newton, Halley, and secant methods for arrays. + + Do not use this method directly. This method is called from `newton` + when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. + """ + # Explicitly copy `x0` as `p` will be modified inplace, but the + # user's array should not be altered. + p = np.array(x0, copy=True) + + failures = np.ones_like(p, dtype=bool) + nz_der = np.ones_like(failures) + if fprime is not None: + # Newton-Raphson method + for iteration in range(maxiter): + # first evaluate fval + fval = np.asarray(func(p, *args)) + # If all fval are 0, all roots have been found, then terminate + if not fval.any(): + failures = fval.astype(bool) + break + fder = np.asarray(fprime(p, *args)) + nz_der = (fder != 0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + break + # Newton step + dp = fval[nz_der] / fder[nz_der] + if fprime2 is not None: + fder2 = np.asarray(fprime2(p, *args)) + dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, dp, np.float64)) + p[nz_der] -= dp + failures[nz_der] = np.abs(dp) >= tol # items not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + else: + # Secant method + dx = np.finfo(float).eps**0.33 + p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) + q0 = np.asarray(func(p, *args)) + q1 = np.asarray(func(p1, *args)) + active = np.ones_like(p, dtype=bool) + for iteration in range(maxiter): + nz_der = (q1 != q0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + p = (p1 + p) / 2.0 + break + # Secant Step + dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64)) + p[nz_der] = p1[nz_der] - dp + active_zero_der = ~nz_der & active + p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 + active &= nz_der # don't assign zero derivatives again + failures[nz_der] = np.abs(dp) >= tol # not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + p1, p = p, p1 + q0 = q1 + q1 = np.asarray(func(p1, *args)) + + zero_der = ~nz_der & failures # don't include converged with zero-ders + if zero_der.any(): + # Secant warnings + if fprime is None: + nonzero_dp = (p1 != p) + # non-zero dp, but infinite newton step + zero_der_nz_dp = (zero_der & nonzero_dp) + if zero_der_nz_dp.any(): + rms = np.sqrt( + sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) + ) + warnings.warn(f'RMS of {rms:g} reached', RuntimeWarning, stacklevel=3) + # Newton or Halley warnings + else: + all_or_some = 'all' if zero_der.all() else 'some' + msg = f'{all_or_some:s} derivatives were zero' + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif failures.any(): + all_or_some = 'all' if failures.all() else 'some' + msg = f'{all_or_some:s} failed to converge after {maxiter:d} iterations' + if failures.all(): + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=3) + + if full_output: + result = namedtuple('result', ('root', 'converged', 'zero_der')) + p = result(p, ~failures, zero_der) + + return p + + +def bisect(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find root of a function within an interval using bisection. + + Basic bisection routine to find a root of the function `f` between the + arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. + Slow but sure. + + Parameters + ---------- + f : function + Python function returning a number. `f` must be continuous, and + f(a) and f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where x is the root, and r is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in a `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.bisect(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.bisect(f, -2, 0) + >>> root + -1.0 + + See Also + -------- + brentq, brenth, bisect, newton + fixed_point : scalar fixed-point finder + fsolve : n-dimensional root-finding + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "bisect") + + +def ridder(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in an interval using Ridder's method. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. + In particular, ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + Uses [Ridders1979]_ method to find a root of the function `f` between the + arguments `a` and `b`. Ridders' method is faster than bisection, but not + generally as fast as the Brent routines. [Ridders1979]_ provides the + classic description and source of the algorithm. A description can also be + found in any recent edition of Numerical Recipes. + + The routine used here diverges slightly from standard presentations in + order to be a bit more careful of tolerance. + + References + ---------- + .. [Ridders1979] + Ridders, C. F. J. "A New Algorithm for Computing a + Single Root of a Real Continuous Function." + IEEE Trans. Circuits Systems 26, 979-980, 1979. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.ridder(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.ridder(f, -2, 0) + >>> root + -1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "ridder") + + +def brentq(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in a bracketing interval using Brent's method. + + Uses the classic Brent's method to find a root of the function `f` on + the sign changing interval [a , b]. Generally considered the best of the + rootfinding routines here. It is a safe version of the secant method that + uses inverse quadratic extrapolation. Brent's method combines root + bracketing, interval bisection, and inverse quadratic interpolation. It is + sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) + claims convergence is guaranteed for functions computable within [a,b]. + + [Brent1973]_ provides the classic description of the algorithm. Another + description can be found in a recent edition of Numerical Recipes, including + [PressEtal1992]_. A third description is at + http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to + understand the algorithm just by reading our code. Our code diverges a bit + from standard presentations: we choose a different formula for the + extrapolation step. + + Parameters + ---------- + f : function + Python function returning a number. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` must + have opposite signs. + a : scalar + One end of the bracketing interval :math:`[a, b]`. + b : scalar + The other end of the bracketing interval :math:`[a, b]`. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Notes + ----- + `f` must be continuous. f(a) and f(b) must have opposite signs. + + Related functions fall into several classes: + + multivariate local optimizers + `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` + nonlinear least squares minimizer + `leastsq` + constrained multivariate optimizers + `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` + global optimizers + `basinhopping`, `brute`, `differential_evolution` + local scalar minimizers + `fminbound`, `brent`, `golden`, `bracket` + N-D root-finding + `fsolve` + 1-D root-finding + `brenth`, `ridder`, `bisect`, `newton` + scalar fixed-point finder + `fixed_point` + + References + ---------- + .. [Brent1973] + Brent, R. P., + *Algorithms for Minimization Without Derivatives*. + Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. + + .. [PressEtal1992] + Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. + *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. + Cambridge, England: Cambridge University Press, pp. 352-355, 1992. + Section 9.3: "Van Wijngaarden-Dekker-Brent Method." + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brentq(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brentq(f, 0, 2) + >>> root + 1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brentq") + + +def brenth(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """Find a root of a function in a bracketing interval using Brent's + method with hyperbolic extrapolation. + + A variation on the classic Brent routine to find a root of the function f + between the arguments a and b that uses hyperbolic extrapolation instead of + inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence + for this method, claiming that the upper bound of function evaluations here + is 4 or 5 times that of bisection. + f(a) and f(b) cannot have the same signs. Generally, on a par with the + brent routine, but not as heavily tested. It is a safe version of the + secant method that uses hyperbolic extrapolation. + The version here is by Chuck Harris, and implements Algorithm M of + [BusAndDekker1975]_, where further details (convergence properties, + additional remarks and such) can be found + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. As with `brentq`, for nice + functions the method will often satisfy the above condition + with ``xtol/2`` and ``rtol/2``. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. As with `brentq`, for nice functions + the method will often satisfy the above condition with + ``xtol/2`` and ``rtol/2``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers + leastsq : nonlinear least squares minimizer + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + basinhopping, differential_evolution, brute : global optimizers + fminbound, brent, golden, bracket : local scalar minimizers + fsolve : N-D root-finding + brentq, brenth, ridder, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + References + ---------- + .. [BusAndDekker1975] + Bus, J. C. P., Dekker, T. J., + "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero + of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue + 4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M". + :doi:`10.1145/355656.355659` + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brenth(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brenth(f, 0, 2) + >>> root + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brenth") + + +################################ +# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by +# Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# See [1] + + +def _notclose(fs, rtol=_rtol, atol=_xtol): + # Ensure not None, not 0, all finite, and not very close to each other + notclosefvals = ( + all(fs) and all(np.isfinite(fs)) and + not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) + for i, _f in enumerate(fs[:-1]))) + return notclosefvals + + +def _secant(xvals, fvals): + """Perform a secant step, taking a little care""" + # Secant has many "mathematically" equivalent formulations + # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 + # = x1 - (x1 - x0)/(f1 - f0) * f1 + # = (-x1 * f0 + x0 * f1) / (f1 - f0) + # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + x0, x1 = xvals[:2] + f0, f1 = fvals[:2] + if f0 == f1: + return np.nan + if np.abs(f1) > np.abs(f0): + x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + else: + x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + return x2 + + +def _update_bracket(ab, fab, c, fc): + """Update a bracket given (c, fc), return the discarded endpoints.""" + fa, fb = fab + idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) + rx, rfx = ab[idx], fab[idx] + fab[idx] = fc + ab[idx] = c + return rx, rfx + + +def _compute_divided_differences(xvals, fvals, N=None, full=True, + forward=True): + """Return a matrix of divided differences for the xvals, fvals pairs + + DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i + + If full is False, just return the main diagonal(or last row): + f[a], f[a, b] and f[a, b, c]. + If forward is False, return f[c], f[b, c], f[a, b, c].""" + if full: + if forward: + xvals = np.asarray(xvals) + else: + xvals = np.array(xvals)[::-1] + M = len(xvals) + N = M if N is None else min(N, M) + DD = np.zeros([M, N]) + DD[:, 0] = fvals[:] + for i in range(1, N): + DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) / + (xvals[i:] - xvals[:M - i])) + return DD + + xvals = np.asarray(xvals) + dd = np.array(fvals) + row = np.array(fvals) + idx2Use = (0 if forward else -1) + dd[0] = fvals[idx2Use] + for i in range(1, len(xvals)): + denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] + row = np.diff(row)[:] / denom + dd[i] = row[idx2Use] + return dd + + +def _interpolated_poly(xvals, fvals, x): + """Compute p(x) for the polynomial passing through the specified locations. + + Use Neville's algorithm to compute p(x) where p is the minimal degree + polynomial passing through the points xvals, fvals""" + xvals = np.asarray(xvals) + N = len(xvals) + Q = np.zeros([N, N]) + D = np.zeros([N, N]) + Q[:, 0] = fvals[:] + D[:, 0] = fvals[:] + for k in range(1, N): + alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] + diffik = xvals[0:N - k] - xvals[k:N] + Q[k:, k] = (xvals[k:] - x) / diffik * alpha + D[k:, k] = (xvals[:N - k] - x) / diffik * alpha + # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root + return np.sum(Q[-1, 1:]) + Q[-1, 0] + + +def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): + """Inverse cubic interpolation f-values -> x-values + + Given four points (fa, a), (fb, b), (fc, c), (fd, d) with + fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points + and compute x=IP(0). + """ + return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) + + +def _newton_quadratic(ab, fab, d, fd, k): + """Apply Newton-Raphson like steps, using divided differences to approximate f' + + ab is a real interval [a, b] containing a root, + fab holds the real values of f(a), f(b) + d is a real number outside [ab, b] + k is the number of steps to apply + """ + a, b = ab + fa, fb = fab + _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], + forward=True, full=False) + + # _P is the quadratic polynomial through the 3 points + def _P(x): + # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) + return (A * (x - b) + B) * (x - a) + fa + + if A == 0: + r = a - fa / B + else: + r = (a if np.sign(A) * np.sign(fa) > 0 else b) + # Apply k Newton-Raphson steps to _P(x), starting from x=r + for i in range(k): + r1 = r - _P(r) / (B + A * (2 * r - a - b)) + if not (ab[0] < r1 < ab[1]): + if (ab[0] < r < ab[1]): + return r + r = sum(ab) / 2.0 + break + r = r1 + + return r + + +class TOMS748Solver: + """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. + """ + _MU = 0.5 + _K_MIN = 1 + _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. + + def __init__(self): + self.f = None + self.args = None + self.function_calls = 0 + self.iterations = 0 + self.k = 2 + # ab=[a,b] is a global interval containing a root + self.ab = [np.nan, np.nan] + # fab is function values at a, b + self.fab = [np.nan, np.nan] + self.d = None + self.fd = None + self.e = None + self.fe = None + self.disp = False + self.xtol = _xtol + self.rtol = _rtol + self.maxiter = _iter + + def configure(self, xtol, rtol, maxiter, disp, k): + self.disp = disp + self.xtol = xtol + self.rtol = rtol + self.maxiter = maxiter + # Silently replace a low value of k with 1 + self.k = max(k, self._K_MIN) + # Noisily replace a high value of k with self._K_MAX + if self.k > self._K_MAX: + msg = "toms748: Overriding k: ->%d" % self._K_MAX + warnings.warn(msg, RuntimeWarning, stacklevel=3) + self.k = self._K_MAX + + def _callf(self, x, error=True): + """Call the user-supplied function, update book-keeping""" + fx = self.f(x, *self.args) + self.function_calls += 1 + if not np.isfinite(fx) and error: + raise ValueError(f"Invalid function value: f({x:f}) -> {fx} ") + return fx + + def get_result(self, x, flag=_ECONVERGED): + r"""Package the result and statistics into a tuple.""" + return (x, self.function_calls, self.iterations, flag) + + def _update_bracket(self, c, fc): + return _update_bracket(self.ab, self.fab, c, fc) + + def start(self, f, a, b, args=()): + r"""Prepare for the iterations.""" + self.function_calls = 0 + self.iterations = 0 + + self.f = f + self.args = args + self.ab[:] = [a, b] + if not np.isfinite(a) or np.imag(a) != 0: + raise ValueError("Invalid x value: %s " % (a)) + if not np.isfinite(b) or np.imag(b) != 0: + raise ValueError("Invalid x value: %s " % (b)) + + fa = self._callf(a) + if not np.isfinite(fa) or np.imag(fa) != 0: + raise ValueError(f"Invalid function value: f({a:f}) -> {fa} ") + if fa == 0: + return _ECONVERGED, a + fb = self._callf(b) + if not np.isfinite(fb) or np.imag(fb) != 0: + raise ValueError(f"Invalid function value: f({b:f}) -> {fb} ") + if fb == 0: + return _ECONVERGED, b + + if np.sign(fb) * np.sign(fa) > 0: + raise ValueError("f(a) and f(b) must have different signs, but " + f"f({a:e})={fa:e}, f({b:e})={fb:e} ") + self.fab[:] = [fa, fb] + + return _EINPROGRESS, sum(self.ab) / 2.0 + + def get_status(self): + """Determine the current status.""" + a, b = self.ab[:2] + if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): + return _ECONVERGED, sum(self.ab) / 2.0 + if self.iterations >= self.maxiter: + return _ECONVERR, sum(self.ab) / 2.0 + return _EINPROGRESS, sum(self.ab) / 2.0 + + def iterate(self): + """Perform one step in the algorithm. + + Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] + """ + self.iterations += 1 + eps = np.finfo(float).eps + d, fd, e, fe = self.d, self.fd, self.e, self.fe + ab_width = self.ab[1] - self.ab[0] # Need the start width below + c = None + + for nsteps in range(2, self.k+2): + # If the f-values are sufficiently separated, perform an inverse + # polynomial interpolation step. Otherwise, nsteps repeats of + # an approximate Newton-Raphson step. + if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): + c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, + self.fab[0], self.fab[1], fd, fe) + if self.ab[0] < c0 < self.ab[1]: + c = c0 + if c is None: + c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + # re-bracket + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # u is the endpoint with the smallest f-value + uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) + u, fu = self.ab[uix], self.fab[uix] + + _, A = _compute_divided_differences(self.ab, self.fab, + forward=(uix == 0), full=False) + c = u - 2 * fu / A + if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): + c = sum(self.ab) / 2.0 + else: + if np.isclose(c, u, rtol=eps, atol=0): + # c didn't change (much). + # Either because the f-values at the endpoints have vastly + # differing magnitudes, or because the root is very close to + # that endpoint + frs = np.frexp(self.fab)[1] + if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 + c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 + else: + # Make a bigger adjustment, about the + # size of the requested tolerance. + mm = (1 if uix == 0 else -1) + adj = mm * np.abs(c) * self.rtol + mm * self.xtol + c = u + adj + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # If the width of the new interval did not decrease enough, bisect + if self.ab[1] - self.ab[0] > self._MU * ab_width: + e, fe = d, fd + z = sum(self.ab) / 2.0 + fz = self._callf(z) + if fz == 0: + return _ECONVERGED, z + d, fd = self._update_bracket(z, fz) + + # Record d and e for next iteration + self.d, self.fd = d, fd + self.e, self.fe = e, fe + + status, xn = self.get_status() + return status, xn + + def solve(self, f, a, b, args=(), + xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): + r"""Solve f(x) = 0 given an interval containing a root.""" + self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) + status, xn = self.start(f, a, b, args) + if status == _ECONVERGED: + return self.get_result(xn) + + # The first step only has two x-values. + c = _secant(self.ab, self.fab) + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + fc = self._callf(c) + if fc == 0: + return self.get_result(c) + + self.d, self.fd = self._update_bracket(c, fc) + self.e, self.fe = None, None + self.iterations += 1 + + while True: + status, xn = self.iterate() + if status == _ECONVERGED: + return self.get_result(xn) + if status == _ECONVERR: + fmt = "Failed to converge after %d iterations, bracket is %s" + if disp: + msg = fmt % (self.iterations + 1, self.ab) + raise RuntimeError(msg) + return self.get_result(xn, _ECONVERR) + + +def toms748(f, a, b, args=(), k=1, + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root using TOMS Algorithm 748 method. + + Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a + root of the function `f` on the interval `[a , b]`, where `f(a)` and + `f(b)` must have opposite signs. + + It uses a mixture of inverse cubic interpolation and + "Newton-quadratic" steps. [APS1995]. + + Parameters + ---------- + f : function + Python function returning a scalar. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` + have opposite signs. + a : scalar, + lower boundary of the search interval + b : scalar, + upper boundary of the search interval + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``f(x, *args)``. + k : int, optional + The number of Newton quadratic steps to perform each + iteration. ``k>=1``. + xtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in the `RootResults` + return object. + + Returns + ------- + root : float + Approximate root of `f` + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, ridder, bisect, newton + fsolve : find roots in N dimensions. + + Notes + ----- + `f` must be continuous. + Algorithm 748 with ``k=2`` is asymptotically the most efficient + algorithm known for finding roots of a four times continuously + differentiable function. + In contrast with Brent's algorithm, which may only decrease the length of + the enclosing bracket on the last step, Algorithm 748 decreases it each + iteration with the same asymptotic efficiency as it finds the root. + + For easy statement of efficiency indices, assume that `f` has 4 + continuouous deriviatives. + For ``k=1``, the convergence order is at least 2.7, and with about + asymptotically 2 function evaluations per iteration, the efficiency + index is approximately 1.65. + For ``k=2``, the order is about 4.6 with asymptotically 3 function + evaluations per iteration, and the efficiency index 1.66. + For higher values of `k`, the efficiency index approaches + the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are + usually appropriate. + + References + ---------- + .. [APS1995] + Alefeld, G. E. and Potra, F. A. and Shi, Yixun, + *Algorithm 748: Enclosing Zeros of Continuous Functions*, + ACM Trans. Math. Softw. Volume 221(1995) + doi = {10.1145/210089.210111} + + Examples + -------- + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> from scipy import optimize + >>> root, results = optimize.toms748(f, 0, 2, full_output=True) + >>> root + 1.0 + >>> results + converged: True + flag: converged + function_calls: 11 + iterations: 5 + root: 1.0 + method: toms748 + """ + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol / 4: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol/4:g})") + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if not np.isfinite(a): + raise ValueError("a is not finite %s" % a) + if not np.isfinite(b): + raise ValueError("b is not finite %s" % b) + if a >= b: + raise ValueError(f"a and b are not an interval [{a}, {b}]") + if not k >= 1: + raise ValueError("k too small (%s < 1)" % k) + + if not isinstance(args, tuple): + args = (args,) + f = _wrap_nan_raise(f) + solver = TOMS748Solver() + result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, + maxiter=maxiter, disp=disp) + x, function_calls, iterations, flag = result + return _results_select(full_output, (x, function_calls, iterations, flag), + "toms748") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cobyla.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..10e2b6a101a386f318a39e00ab2a46eae5219066 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cobyla.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'RLock', + 'fmin_cobyla', + 'functools', + 'izip', + 'synchronized', +] + +def __dir__(): + return __all__ + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="cobyla", + private_modules=["_cobyla_py"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d35f8da68b34d3a587f3a99326770d8550a2135c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd @@ -0,0 +1,11 @@ +# Public Cython API declarations +# +# See doc/source/dev/contributor/public_cython_api.rst for guidelines + + +# The following cimport statement provides legacy ABI +# support. Changing it causes an ABI forward-compatibility break +# (gh-11793), so we currently leave it as is (no further cimport +# statements should be used in this file). +from scipy.optimize.cython_optimize._zeros cimport ( + brentq, brenth, ridder, bisect, zeros_full_output) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py new file mode 100644 index 0000000000000000000000000000000000000000..75b395d27396d22c8cbf50a229e5f04c40237171 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LbfgsInvHessProduct', + 'LinearOperator', + 'MemoizeJac', + 'OptimizeResult', + 'array', + 'asarray', + 'float64', + 'fmin_l_bfgs_b', + 'old_bound_to_new', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="lbfgsb", + private_modules=["_lbfgsb_py"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/linesearch.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1a04d83ba603cf9162e2cae1a6b8853a538c5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/linesearch.py @@ -0,0 +1,30 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LineSearchWarning', + 'line_search', + 'line_search_BFGS', + 'line_search_armijo', + 'line_search_wolfe1', + 'line_search_wolfe2', + 'minpack2', + 'scalar_search_armijo', + 'scalar_search_wolfe1', + 'scalar_search_wolfe2', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="linesearch", + private_modules=["_linesearch"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..b815dec171af576a84cb5b334d118fda7b576210 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack.py @@ -0,0 +1,52 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LEASTSQ_FAILURE', + 'LEASTSQ_SUCCESS', + 'LinAlgError', + 'OptimizeResult', + 'OptimizeWarning', + 'asarray', + 'atleast_1d', + 'check_gradient', + 'cholesky', + 'curve_fit', + 'dot', + 'dtype', + 'error', + 'eye', + 'finfo', + 'fixed_point', + 'fsolve', + 'greater', + 'inexact', + 'inf', + 'inv', + 'issubdtype', + 'least_squares', + 'leastsq', + 'prepare_bounds', + 'prod', + 'shape', + 'solve_triangular', + 'svd', + 'take', + 'transpose', + 'triu', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack", + private_modules=["_minpack_py"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack2.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack2.py new file mode 100644 index 0000000000000000000000000000000000000000..6e961f42403a43c39fbf1670827ecd648b8b3987 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/minpack2.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'dcsrch', + 'dcstep', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack2", + private_modules=["_minpack2"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc5884ed5c39437b7681395419d641443a1fdb8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="moduleTNC", + private_modules=["_moduleTNC"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/nonlin.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..38c43c3d848e9f142bae2e7f55ace4482569998e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/nonlin.py @@ -0,0 +1,57 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Anderson', + 'BroydenFirst', + 'BroydenSecond', + 'DiagBroyden', + 'ExcitingMixing', + 'GenericBroyden', + 'InverseJacobian', + 'Jacobian', + 'KrylovJacobian', + 'LinAlgError', + 'LinearMixing', + 'LowRankMatrix', + 'NoConvergence', + 'TerminationCondition', + 'anderson', + 'asarray', + 'asjacobian', + 'broyden1', + 'broyden2', + 'diagbroyden', + 'dot', + 'excitingmixing', + 'get_blas_funcs', + 'inspect', + 'inv', + 'linearmixing', + 'maxnorm', + 'newton_krylov', + 'nonlin_solve', + 'norm', + 'qr', + 'scalar_search_armijo', + 'scalar_search_wolfe1', + 'scipy', + 'solve', + 'svd', + 'sys', + 'vdot', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="nonlin", + private_modules=["_nonlin"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/optimize.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..81e78d097a752e4ab4db404ed2741495c395981c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/optimize.py @@ -0,0 +1,60 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Brent', + 'FD_METHODS', + 'LineSearchWarning', + 'MapWrapper', + 'MemoizeJac', + 'OptimizeResult', + 'OptimizeWarning', + 'ScalarFunction', + 'approx_derivative', + 'approx_fhess_p', + 'approx_fprime', + 'argmin', + 'asarray', + 'atleast_1d', + 'bracket', + 'brent', + 'brute', + 'check_grad', + 'check_random_state', + 'eye', + 'fmin', + 'fmin_bfgs', + 'fmin_cg', + 'fmin_ncg', + 'fmin_powell', + 'fminbound', + 'golden', + 'line_search', + 'line_search_wolfe1', + 'line_search_wolfe2', + 'rosen', + 'rosen_der', + 'rosen_hess', + 'rosen_hess_prod', + 'shape', + 'show_options', + 'sqrt', + 'squeeze', + 'sys', + 'vecnorm', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="optimize", + private_modules=["_optimize"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/slsqp.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..c225c3cbef7e5bdb733fc9aef878507e957b2a9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/slsqp.py @@ -0,0 +1,37 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'append', + 'approx_derivative', + 'approx_jacobian', + 'array', + 'atleast_1d', + 'concatenate', + 'exp', + 'finfo', + 'fmin_slsqp', + 'inf', + 'isfinite', + 'linalg', + 'old_bound_to_new', + 'slsqp', + 'sqrt', + 'vstack', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="slsqp", + private_modules=["_slsqp_py"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tnc.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..92ff24432c681517aa82aa4ffed5e6c1eee10002 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tnc.py @@ -0,0 +1,44 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'CONSTANT', + 'FCONVERGED', + 'INFEASIBLE', + 'LOCALMINIMUM', + 'LSFAIL', + 'MAXFUN', + 'MSGS', + 'MSG_ALL', + 'MSG_EXIT', + 'MSG_INFO', + 'MSG_ITER', + 'MSG_NONE', + 'MSG_VERS', + 'MemoizeJac', + 'NOPROGRESS', + 'OptimizeResult', + 'RCSTRINGS', + 'USERABORT', + 'XCONVERGED', + 'array', + 'fmin_tnc', + 'inf', + 'moduleTNC', + 'old_bound_to_new', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="tnc", + private_modules=["_tnc"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/zeros.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8fc89eb5f9a93109f3fad31bc887df85998bae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/zeros.py @@ -0,0 +1,36 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'CONVERGED', + 'CONVERR', + 'INPROGRESS', + 'RootResults', + 'SIGNERR', + 'TOMS748Solver', + 'VALUEERR', + 'bisect', + 'brenth', + 'brentq', + 'flag_map', + 'namedtuple', + 'newton', + 'operator', + 'results_c', + 'ridder', + 'toms748', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="zeros", + private_modules=["_zeros_py"], all=__all__, + attribute=name)