diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fbb4e37d4a3ce1a2650eb4980a9c420fc3ec864 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18811735c0cd2a4a3e2b1b9546d96ab65b4c0cb6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e37dc6a7db8ca02d86e9c0b412bc968258dd347 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..984bbf4a171b2656331fea43635603aed9bdbada Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e714e6b048284760264c6ff3f8f1900fea66bf09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7da66ee0ab5bddf26319c0042598deb403bec54a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2553eec6bc3af7c15be53c4ceaa07a64adea5980 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2984cfe04dbbb9bde82bbeb514631cfd65bb3bfd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4337aa445eb913dbde073b69cacc824f1abb9c2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0443ce5f38bcdf8ba68dae15f8151e033e69ded2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2621e35cf333a99b2359650f6619fe6f2ddc55ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dda9e84d33cebe6a1db3af79cfe0cebf6e7cf03e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..231f15bdeb8d1ee7a10f50f53879d32b821e339e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff3c39d649d0ac663d9b71bb906f1daac021118 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py @@ -0,0 +1,214 @@ +import numpy as np +import scipy.linalg +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.projections \ + import projections, orthogonality +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_equal, assert_allclose) + +try: + from sksparse.cholmod import cholesky_AAt # noqa: F401 + sksparse_available = True + available_sparse_methods = ("NormalEquation", "AugmentedSystem") +except ImportError: + sksparse_available = False + available_sparse_methods = ("AugmentedSystem",) +available_dense_methods = ('QRFactorization', 'SVDFactorization') + + +class TestProjections(TestCase): + + def test_nullspace_and_least_squares_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At_dense = A_dense.T + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At_dense, z)[0] + assert_array_almost_equal(x, x2) + + def test_iterative_refinements_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + atol = 1e-13 * abs(x).max() + assert_allclose(A.dot(x), 0, atol=atol) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, atol=1e-13) + + def test_rowspace_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_sparse_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A_dense, x)) + assert_equal(np.linalg.matrix_rank(A_dense), + np.linalg.matrix_rank(A_ext)) + + def test_nullspace_and_least_squares_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At = A.T + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At, z)[0] + assert_array_almost_equal(x, x2) + + def test_compare_dense_and_sparse(self): + D = np.diag(range(1, 101)) + A = np.hstack([D, D, D, D]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(20): + z = np.random.normal(size=(400,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(100,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_compare_dense_and_sparse2(self): + D1 = np.diag([-1.7, 1, 0.5]) + D2 = np.diag([1, -0.6, -0.3]) + D3 = np.diag([-0.3, -1.5, 2]) + A = np.hstack([D1, D2, D3]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(1): + z = np.random.normal(size=(9,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(3,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_iterative_refinements_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16) + + def test_rowspace_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_dense_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A, x)) + assert_equal(np.linalg.matrix_rank(A), + np.linalg.matrix_rank(A_ext)) + + +class TestOrthogonality(TestCase): + + def test_dense_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) + + def test_sparse_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..e0235caace6c19563efc31fdf4b8e41d9d81819b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py @@ -0,0 +1,645 @@ +import numpy as np +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.qp_subproblem \ + import (eqp_kktfact, + projected_cg, + box_intersections, + sphere_intersections, + box_sphere_intersections, + modified_dogleg) +from scipy.optimize._trustregion_constr.projections \ + import projections +from numpy.testing import TestCase, assert_array_almost_equal, assert_equal +import pytest + + +class TestEQPDirectFactorization(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + x, lagrange_multipliers = eqp_kktfact(H, c, A, b) + assert_array_almost_equal(x, [2, -1, 1]) + assert_array_almost_equal(lagrange_multipliers, [3, -2]) + + +class TestSphericalBoundariesIntersections(TestCase): + + def test_2d_sphere_constraints(self): + # Interior inicial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_sphere_constraints_line_intersections(self): + # Interior initial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1, + entire_line=True) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1, + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 3.5]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + +class TestBoxBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1]) + assert_equal(intersect, False) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Interior initial point + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2]) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3]) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2]) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_box_constraints_entire_line(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 1.5]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1.5, -0.5]) + assert_equal(intersect, True) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, np.inf]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Interior initial pointoint + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + def test_3d_box_constraints(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3]) + assert_equal(intersect, False) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + def test_3d_box_constraints_entire_line(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1, 1]) + assert_equal(intersect, True) + + +class TestBoxSphereBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=False) + assert_equal(intersect, False) + + def test_2d_box_constraints_entire_line(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 2]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=True) + assert_equal(intersect, False) + + +class TestModifiedDogleg(TestCase): + + def test_cauchypoint_equalsto_newtonpoint(self): + A = np.array([[1, 8]]) + b = np.array([-16]) + _, _, Y = projections(A) + newton_point = np.array([0.24615385, 1.96923077]) + + # Newton point inside boundaries + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # Spherical constraint active + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point)) + + # Box constraints active + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf]) + assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1) + + def test_3d_example(self): + A = np.array([[1, 8, 1], + [4, 2, 2]]) + b = np.array([-16, 2]) + Z, LS, Y = projections(A) + + newton_point = np.array([-1.37090909, 2.23272727, -0.49090909]) + cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585]) + origin = np.zeros_like(newton_point) + + # newton_point inside boundaries + x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # line between cauchy_point and newton_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.40807330)) + assert_array_almost_equal(np.linalg.norm(x), 2) + + # line between cauchy_point and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.7498195)) + assert_array_almost_equal(x[0], -1) + + # line between origin and cauchy_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = origin + d = cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.573936265)) + assert_array_almost_equal(np.linalg.norm(x), 1) + + # line between origin and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, 1, np.inf]) + z = origin + d = newton_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.4478827364)) + assert_array_almost_equal(x[1], 1) + + +class TestProjectCG(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b) + assert_equal(info["stop_cond"], 4) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, [2, -1, 1]) + + def test_compare_with_direct_fact(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, tol=0) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + def test_trust_region_infeasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 1 + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, trust_radius=trust_radius) + + def test_trust_region_barely_feasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 2.32379000772445021283 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + assert_array_almost_equal(x, -Y.dot(b)) + + def test_hits_boundary(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + def test_negative_curvature_unconstrained(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, tol=0) + + def test_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + # The box constraints are inactive at the solution but + # are active during the iterations. + def test_inactive_box_constraints(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.5, -np.inf, + -np.inf, -np.inf], + return_all=True) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + # The box constraints active and the termination is + # by maximum iterations (infeasible interaction). + def test_active_box_constraints_maximum_iterations_reached(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.8, -np.inf, + -np.inf, -np.inf], + return_all=True) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(A.dot(x), -b) + assert_array_almost_equal(x[0], 0.8) + + # The box constraints are active and the termination is + # because it hits boundary (without infeasible interaction). + def test_active_box_constraints_hits_boundaries(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 1.6, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 1.6) + + # The box constraints are active and the termination is + # because it hits boundary (infeasible interaction). + def test_active_box_constraints_hits_boundaries_infeasible_iter(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 4 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, 0.1, np.inf, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[1], 0.1) + + # The box constraints are active and the termination is + # because it hits boundary (no infeasible interaction). + def test_active_box_constraints_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 100, np.inf], + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 100) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py new file mode 100644 index 0000000000000000000000000000000000000000..e08d3e8490044caceb1bf3e6ab2cd6c37c5c8f70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py @@ -0,0 +1,32 @@ +import numpy as np +from scipy.optimize import minimize, Bounds + +def test_gh10880(): + # checks that verbose reporting works with trust-constr for + # bound-contrained problems + bnds = Bounds(1, 2) + opts = {'maxiter': 1000, 'verbose': 2} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + +def test_gh12922(): + # checks that verbose reporting works with trust-constr for + # general constraints + def objective(x): + return np.array([(np.sum((x+1)**4))]) + + cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2} + n = 25 + x0 = np.linspace(-5, 5, n) + + opts = {'maxiter': 1000, 'verbose': 2} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6a8c46ff0226b603cadc8c83e9082c9968e6e401 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d3c9e98f0a24d80d15d1f7052f690d608f66dd80 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/_zeros.pxd @@ -0,0 +1,33 @@ +# Legacy public Cython API declarations +# +# NOTE: due to the way Cython ABI compatibility works, **no changes +# should be made to this file** --- any API additions/changes should be +# done in `cython_optimize.pxd` (see gh-11793). + +ctypedef double (*callback_type)(double, void*) noexcept + +ctypedef struct zeros_parameters: + callback_type function + void* args + +ctypedef struct zeros_full_output: + int funcalls + int iterations + int error_num + double root + +cdef double bisect(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double ridder(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double brenth(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil + +cdef double brentq(callback_type f, double xa, double xb, void* args, + double xtol, double rtol, int iter, + zeros_full_output *full_output) noexcept nogil diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0d83c80eb886846ddbbd6927e37e05812911f856 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/c_zeros.pxd @@ -0,0 +1,26 @@ +cdef extern from "../Zeros/zeros.h": + ctypedef double (*callback_type)(double, void*) noexcept + ctypedef struct scipy_zeros_info: + int funcalls + int iterations + int error_num + +cdef extern from "../Zeros/bisect.c" nogil: + double bisect(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/ridder.c" nogil: + double ridder(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/brenth.c" nogil: + double brenth(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) + +cdef extern from "../Zeros/brentq.c" nogil: + double brentq(callback_type f, double xa, double xb, double xtol, + double rtol, int iter, void *func_data_param, + scipy_zeros_info *solver_stats) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9009b953711324749b629d52ee148534277e739e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a465b35bb571ce69c03da5f815ea92312719585 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c03651db1eeb67f954410fc8b409a409dc4a2fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..171dcd232812f85c492a7ad5200cbfc87495c5f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bc86bf209b96f1d4244911cf4d25f8f2d98ed04 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c25cb3f97549b103ff4a2f4ba9c7e12b6503ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa8e15734eb4e54ad966109d2fd736a9bf3cbe48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59c103857145ea435493e003547b26a47d1010a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7fdce8e8a95667ea10ca2cc53bb480cbed6bf68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b461cf8377fb2effa15460d4cfd738fabc94921b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae229746eaa101c82f99ae09967fba7ba1718783 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..493acd004696497b21a42771d93d1f67857a62ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py new file mode 100644 index 0000000000000000000000000000000000000000..d4230dc5ab49d66a7952bf4e61dc4da5838a1b85 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py @@ -0,0 +1,525 @@ +""" +Unit tests for the basin hopping global minimization algorithm. +""" +import copy + +from numpy.testing import (assert_almost_equal, assert_equal, assert_, + assert_allclose) +import pytest +from pytest import raises as assert_raises +import numpy as np +from numpy import cos, sin + +from scipy.optimize import basinhopping, OptimizeResult +from scipy.optimize._basinhopping import ( + Storage, RandomDisplacement, Metropolis, AdaptiveStepsize) + + +def func1d(x): + f = cos(14.5 * x - 0.3) + (x + 0.2) * x + df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2) + return f, df + + +def func2d_nograd(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + return f + + +def func2d(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + df = np.zeros(2) + df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + df[1] = 2. * x[1] + 0.2 + return f, df + + +def func2d_easyderiv(x): + f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0] + df = np.zeros(2) + df[0] = 4.0*x[0] + 2.0*x[1] - 6.0 + df[1] = 2.0*x[0] + 4.0*x[1] + + return f, df + + +class MyTakeStep1(RandomDisplacement): + """use a copy of displace, but have it set a special parameter to + make sure it's actually being used.""" + def __init__(self): + self.been_called = False + super().__init__() + + def __call__(self, x): + self.been_called = True + return super().__call__(x) + + +def myTakeStep2(x): + """redo RandomDisplacement in function form without the attribute stepsize + to make sure everything still works ok + """ + s = 0.5 + x += np.random.uniform(-s, s, np.shape(x)) + return x + + +class MyAcceptTest: + """pass a custom accept test + + This does nothing but make sure it's being used and ensure all the + possible return values are accepted + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + self.testres = [False, 'force accept', True, np.bool_(True), + np.bool_(False), [], {}, 0, 1] + + def __call__(self, **kwargs): + self.been_called = True + self.ncalls += 1 + if self.ncalls - 1 < len(self.testres): + return self.testres[self.ncalls - 1] + else: + return True + + +class MyCallBack: + """pass a custom callback function + + This makes sure it's being used. It also returns True after 10 + steps to ensure that it's stopping early. + + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x, f, accepted): + self.been_called = True + self.ncalls += 1 + if self.ncalls == 10: + return True + + +class TestBasinHopping: + + def setup_method(self): + """ Tests setup. + + Run tests based on the 1-D and 2-D functions described above. + """ + self.x0 = (1.0, [1.0, 1.0]) + self.sol = (-0.195, np.array([-0.195, -0.1])) + + self.tol = 3 # number of decimal places + + self.niter = 100 + self.disp = False + + # fix random seed + np.random.seed(1234) + + self.kwargs = {"method": "L-BFGS-B", "jac": True} + self.kwargs_nograd = {"method": "L-BFGS-B"} + + def test_TypeError(self): + # test the TypeErrors are raised on bad input + i = 1 + # if take_step is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + take_step=1) + # if accept_test is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + accept_test=1) + + def test_input_validation(self): + msg = 'target_accept_rate has to be in range \\(0, 1\\)' + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], target_accept_rate=0.) + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], target_accept_rate=1.) + + msg = 'stepwise_factor has to be in range \\(0, 1\\)' + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], stepwise_factor=0.) + with assert_raises(ValueError, match=msg): + basinhopping(func1d, self.x0[0], stepwise_factor=1.) + + def test_1d_grad(self): + # test 1-D minimizations with gradient + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_2d(self): + # test 2d minimizations with gradient + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(res.nfev > 0) + + def test_njev(self): + # test njev is returned correctly + i = 1 + minimizer_kwargs = self.kwargs.copy() + # L-BFGS-B doesn't use njev, but BFGS does + minimizer_kwargs["method"] = "BFGS" + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + assert_(res.nfev > 0) + assert_equal(res.nfev, res.njev) + + def test_jac(self): + # test Jacobian returned + minimizer_kwargs = self.kwargs.copy() + # BFGS returns a Jacobian + minimizer_kwargs["method"] = "BFGS" + + res = basinhopping(func2d_easyderiv, [0.0, 0.0], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + + assert_(hasattr(res.lowest_optimization_result, "jac")) + + # in this case, the Jacobian is just [df/dx, df/dy] + _, jacobian = func2d_easyderiv(res.x) + assert_almost_equal(res.lowest_optimization_result.jac, jacobian, + self.tol) + + def test_2d_nograd(self): + # test 2-D minimizations without gradient + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_all_minimizers(self): + # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA + # don't accept jac=True, so aren't included here. + i = 1 + methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP'] + minimizer_kwargs = copy.copy(self.kwargs) + for method in methods: + minimizer_kwargs["method"] = method + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_all_nograd_minimizers(self): + # Test 2-D minimizations without gradient. Newton-CG requires jac=True, + # so not included here. + i = 1 + methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP', + 'Nelder-Mead', 'Powell', 'COBYLA'] + minimizer_kwargs = copy.copy(self.kwargs_nograd) + for method in methods: + minimizer_kwargs["method"] = method + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=self.niter, disp=self.disp) + tol = self.tol + if method == 'COBYLA': + tol = 2 + assert_almost_equal(res.x, self.sol[i], decimal=tol) + + def test_pass_takestep(self): + # test that passing a custom takestep works + # also test that the stepsize is being adjusted + takestep = MyTakeStep1() + initial_step_size = takestep.stepsize + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(takestep.been_called) + # make sure that the build in adaptive step size has been used + assert_(initial_step_size != takestep.stepsize) + + def test_pass_simple_takestep(self): + # test that passing a custom takestep without attribute stepsize + takestep = myTakeStep2 + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_pass_accept_test(self): + # test passing a custom accept test + # makes sure it's being used and ensures all the possible return values + # are accepted. + accept_test = MyAcceptTest() + i = 1 + # there's no point in running it more than a few steps. + basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=10, disp=self.disp, accept_test=accept_test) + assert_(accept_test.been_called) + + def test_pass_callback(self): + # test passing a custom callback function + # This makes sure it's being used. It also returns True after 10 steps + # to ensure that it's stopping early. + callback = MyCallBack() + i = 1 + # there's no point in running it more than a few steps. + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=30, disp=self.disp, callback=callback) + assert_(callback.been_called) + assert_("callback" in res.message[0]) + # One of the calls of MyCallBack is during BasinHoppingRunner + # construction, so there are only 9 remaining before MyCallBack stops + # the minimization. + assert_equal(res.nit, 9) + + def test_minimizer_fail(self): + # test if a minimizer fails + i = 1 + self.kwargs["options"] = dict(maxiter=0) + self.niter = 10 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + # the number of failed minimizations should be the number of + # iterations + 1 + assert_equal(res.nit + 1, res.minimization_failures) + + def test_niter_zero(self): + # gh5915, what happens if you call basinhopping with niter=0 + i = 0 + basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=0, disp=self.disp) + + def test_seed_reproducibility(self): + # seed should ensure reproducibility between runs + minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} + + f_1 = [] + + def callback(x, f, accepted): + f_1.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback, seed=10) + + f_2 = [] + + def callback2(x, f, accepted): + f_2.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback2, seed=10) + assert_equal(np.array(f_1), np.array(f_2)) + + def test_random_gen(self): + # check that np.random.Generator can be used (numpy >= 1.17) + rng = np.random.default_rng(1) + + minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} + + res1 = basinhopping(func2d, [1.0, 1.0], + minimizer_kwargs=minimizer_kwargs, + niter=10, seed=rng) + + rng = np.random.default_rng(1) + res2 = basinhopping(func2d, [1.0, 1.0], + minimizer_kwargs=minimizer_kwargs, + niter=10, seed=rng) + assert_equal(res1.x, res2.x) + + def test_monotonic_basin_hopping(self): + # test 1-D minimizations with gradient and T=0 + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, T=0) + assert_almost_equal(res.x, self.sol[i], self.tol) + + +class Test_Storage: + def setup_method(self): + self.x0 = np.array(1) + self.f0 = 0 + + minres = OptimizeResult(success=True) + minres.x = self.x0 + minres.fun = self.f0 + + self.storage = Storage(minres) + + def test_higher_f_rejected(self): + new_minres = OptimizeResult(success=True) + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 + 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert_equal(self.x0, minres.x) + assert_equal(self.f0, minres.fun) + assert_(not ret) + + @pytest.mark.parametrize('success', [True, False]) + def test_lower_f_accepted(self, success): + new_minres = OptimizeResult(success=success) + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 - 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert (self.x0 != minres.x) == success # can't use `is` + assert (self.f0 != minres.fun) == success # left side is NumPy bool + assert ret is success + + +class Test_RandomDisplacement: + def setup_method(self): + self.stepsize = 1.0 + self.displace = RandomDisplacement(stepsize=self.stepsize) + self.N = 300000 + self.x0 = np.zeros([self.N]) + + def test_random(self): + # the mean should be 0 + # the variance should be (2*stepsize)**2 / 12 + # note these tests are random, they will fail from time to time + x = self.displace(self.x0) + v = (2. * self.stepsize) ** 2 / 12 + assert_almost_equal(np.mean(x), 0., 1) + assert_almost_equal(np.var(x), v, 1) + + +class Test_Metropolis: + def setup_method(self): + self.T = 2. + self.met = Metropolis(self.T) + self.res_new = OptimizeResult(success=True, fun=0.) + self.res_old = OptimizeResult(success=True, fun=1.) + + def test_boolean_return(self): + # the return must be a bool, else an error will be raised in + # basinhopping + ret = self.met(res_new=self.res_new, res_old=self.res_old) + assert isinstance(ret, bool) + + def test_lower_f_accepted(self): + assert_(self.met(res_new=self.res_new, res_old=self.res_old)) + + def test_accept(self): + # test that steps are randomly accepted for f_new > f_old + one_accept = False + one_reject = False + for i in range(1000): + if one_accept and one_reject: + break + res_new = OptimizeResult(success=True, fun=1.) + res_old = OptimizeResult(success=True, fun=0.5) + ret = self.met(res_new=res_new, res_old=res_old) + if ret: + one_accept = True + else: + one_reject = True + assert_(one_accept) + assert_(one_reject) + + def test_GH7495(self): + # an overflow in exp was producing a RuntimeWarning + # create own object here in case someone changes self.T + met = Metropolis(2) + res_new = OptimizeResult(success=True, fun=0.) + res_old = OptimizeResult(success=True, fun=2000) + with np.errstate(over='raise'): + met.accept_reject(res_new=res_new, res_old=res_old) + + def test_gh7799(self): + # gh-7799 reported a problem in which local search was successful but + # basinhopping returned an invalid solution. Show that this is fixed. + def func(x): + return (x**2-8)**2+(x+2)**2 + + x0 = -4 + limit = 50 # Constrain to func value >= 50 + con = {'type': 'ineq', 'fun': lambda x: func(x) - limit}, + res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con}) + assert res.success + assert_allclose(res.fun, limit, rtol=1e-6) + + def test_accept_gh7799(self): + # Metropolis should not accept the result of an unsuccessful new local + # search if the old local search was successful + + met = Metropolis(0) # monotonic basin hopping + res_new = OptimizeResult(success=True, fun=0.) + res_old = OptimizeResult(success=True, fun=1.) + + # if new local search was successful and energy is lower, accept + assert met(res_new=res_new, res_old=res_old) + # if new res is unsuccessful, don't accept - even if energy is lower + res_new.success = False + assert not met(res_new=res_new, res_old=res_old) + # ...unless the old res was unsuccessful, too. In that case, why not? + res_old.success = False + assert met(res_new=res_new, res_old=res_old) + + def test_reject_all_gh7799(self): + # Test the behavior when there is no feasible solution + def fun(x): + return x@x + + def constraint(x): + return x + 1 + + kwargs = {'constraints': {'type': 'eq', 'fun': constraint}, + 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'} + res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs) + assert not res.success + + +class Test_AdaptiveStepsize: + def setup_method(self): + self.stepsize = 1. + self.ts = RandomDisplacement(stepsize=self.stepsize) + self.target_accept_rate = 0.5 + self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False, + accept_rate=self.target_accept_rate) + + def test_adaptive_increase(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(False) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_adaptive_decrease(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(True) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) + + def test_all_accepted(self): + # test that everything works OK if all steps were accepted + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_all_rejected(self): + # test that everything works OK if all steps were rejected + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py new file mode 100644 index 0000000000000000000000000000000000000000..d5638a120aaa2f4cec8cec44d2ec8b86c74eebb5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py @@ -0,0 +1,1677 @@ +""" +Unit tests for the differential global minimization algorithm. +""" +import multiprocessing +import platform + +from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver, + _ConstraintWrapper) +from scipy.optimize import differential_evolution, OptimizeResult +from scipy.optimize._constraints import (Bounds, NonlinearConstraint, + LinearConstraint) +from scipy.optimize import rosen, minimize +from scipy.sparse import csr_matrix +from scipy import stats + +import numpy as np +from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal, + assert_string_equal, assert_, suppress_warnings) +from pytest import raises as assert_raises, warns +import pytest + + +class TestDifferentialEvolutionSolver: + + def setup_method(self): + self.old_seterr = np.seterr(invalid='raise') + self.limits = np.array([[0., 0.], + [2., 2.]]) + self.bounds = [(0., 2.), (0., 2.)] + + self.dummy_solver = DifferentialEvolutionSolver(self.quadratic, + [(0, 100)]) + + # dummy_solver2 will be used to test mutation strategies + self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic, + [(0, 1)], + popsize=7, + mutation=0.5) + # create a population that's only 7 members long + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T + self.dummy_solver2.population = population + + def teardown_method(self): + np.seterr(**self.old_seterr) + + def quadratic(self, x): + return x[0]**2 + + def test__strategy_resolves(self): + # test that the correct mutation function is resolved by + # different requested strategy arguments + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp') + assert_equal(solver.strategy, 'best1exp') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1bin') + assert_equal(solver.strategy, 'best1bin') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1bin') + assert_equal(solver.strategy, 'rand1bin') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1exp') + assert_equal(solver.strategy, 'rand1exp') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best2bin') + assert_equal(solver.strategy, 'best2bin') + assert_equal(solver.mutation_func.__name__, '_best2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2bin') + assert_equal(solver.strategy, 'rand2bin') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1bin') + assert_equal(solver.strategy, 'randtobest1bin') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1exp') + assert_equal(solver.strategy, 'randtobest1exp') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1bin') + assert_equal(solver.strategy, 'currenttobest1bin') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1exp') + assert_equal(solver.strategy, 'currenttobest1exp') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + def test__mutate1(self): + # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc. + result = np.array([0.05]) + trial = self.dummy_solver2._best1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + result = np.array([0.25]) + trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__mutate2(self): + # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc. + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + + result = np.array([-0.1]) + trial = self.dummy_solver2._best2((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + result = np.array([0.1]) + trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__randtobest1(self): + # strategies randtobest/1/* + result = np.array([0.15]) + trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__currenttobest1(self): + # strategies currenttobest/1/* + result = np.array([0.1]) + trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test_can_init_with_dithering(self): + mutation = (0.5, 1) + solver = DifferentialEvolutionSolver(self.quadratic, + self.bounds, + mutation=mutation) + + assert_equal(solver.dither, list(mutation)) + + def test_invalid_mutation_values_arent_accepted(self): + func = rosen + mutation = (0.5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (-1, 1) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (0.1, np.nan) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = 0.5 + solver = DifferentialEvolutionSolver(func, + self.bounds, + mutation=mutation) + assert_equal(0.5, solver.scale) + assert_equal(None, solver.dither) + + def test_invalid_functional(self): + def func(x): + return np.array([np.sum(x ** 2), np.sum(x)]) + + with assert_raises( + RuntimeError, + match=r"func\(x, \*args\) must return a scalar value"): + differential_evolution(func, [(-2, 2), (-2, 2)]) + + def test__scale_parameters(self): + trial = np.array([0.3]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + def test__unscale_parameters(self): + trial = np.array([30]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + def test_equal_bounds(self): + with np.errstate(invalid='raise'): + solver = DifferentialEvolutionSolver( + self.quadratic, + bounds=[(2.0, 2.0), (1.0, 3.0)] + ) + v = solver._unscale_parameters([2.0, 2.0]) + assert_allclose(v, 0.5) + + res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)]) + assert_equal(res.x, [2.0, 3.0]) + + def test__ensure_constraint(self): + trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001]) + self.dummy_solver._ensure_constraint(trial) + + assert_equal(trial[2], 0.9) + assert_(np.logical_and(trial >= 0, trial <= 1).all()) + + def test_differential_evolution(self): + # test that the Jmin of DifferentialEvolutionSolver + # is the same as the function evaluation + solver = DifferentialEvolutionSolver( + self.quadratic, [(-2, 2)], maxiter=1, polish=False + ) + result = solver.solve() + assert_equal(result.fun, self.quadratic(result.x)) + + solver = DifferentialEvolutionSolver( + self.quadratic, [(-2, 2)], maxiter=1, polish=True + ) + result = solver.solve() + assert_equal(result.fun, self.quadratic(result.x)) + + def test_best_solution_retrieval(self): + # test that the getter property method for the best solution works. + solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) + result = solver.solve() + assert_equal(result.x, solver.x) + + def test_intermediate_result(self): + # Check that intermediate result object passed into the callback + # function contains the expected information and that raising + # `StopIteration` causes the expected behavior. + maxiter = 10 + + def func(x): + val = rosen(x) + if val < func.val: + func.x = x + func.val = val + return val + func.x = None + func.val = np.inf + + def callback(intermediate_result): + callback.nit += 1 + callback.intermediate_result = intermediate_result + assert intermediate_result.population.ndim == 2 + assert intermediate_result.population.shape[1] == 2 + assert intermediate_result.nit == callback.nit + + # Check that `x` and `fun` attributes are the best found so far + assert_equal(intermediate_result.x, callback.func.x) + assert_equal(intermediate_result.fun, callback.func.val) + + # Check for consistency between `fun`, `population_energies`, + # `x`, and `population` + assert_equal(intermediate_result.fun, rosen(intermediate_result.x)) + for i in range(len(intermediate_result.population_energies)): + res = intermediate_result.population_energies[i] + ref = rosen(intermediate_result.population[i]) + assert_equal(res, ref) + assert_equal(intermediate_result.x, + intermediate_result.population[0]) + assert_equal(intermediate_result.fun, + intermediate_result.population_energies[0]) + + assert intermediate_result.message == 'in progress' + assert intermediate_result.success is True + assert isinstance(intermediate_result, OptimizeResult) + if callback.nit == maxiter: + raise StopIteration + callback.nit = 0 + callback.intermediate_result = None + callback.func = func + + bounds = [(0, 2), (0, 2)] + kwargs = dict(func=func, bounds=bounds, seed=838245, polish=False) + res = differential_evolution(**kwargs, callback=callback) + ref = differential_evolution(**kwargs, maxiter=maxiter) + + # Check that final `intermediate_result` is equivalent to returned + # result object and that terminating with callback `StopIteration` + # after `maxiter` iterations is equivalent to terminating with + # `maxiter` parameter. + assert res.success is ref.success is False + assert callback.nit == res.nit == maxiter + assert res.message == 'callback function requested stop early' + assert ref.message == 'Maximum number of iterations has been exceeded.' + for field, val in ref.items(): + if field in {'message', 'success'}: # checked separately + continue + assert_equal(callback.intermediate_result[field], val) + assert_equal(res[field], val) + + # Check that polish occurs after `StopIteration` as advertised + callback.nit = 0 + func.val = np.inf + kwargs['polish'] = True + res = differential_evolution(**kwargs, callback=callback) + assert res.fun < ref.fun + + def test_callback_terminates(self): + # test that if the callback returns true, then the minimization halts + bounds = [(0, 2), (0, 2)] + expected_msg = 'callback function requested stop early' + def callback_python_true(param, convergence=0.): + return True + + result = differential_evolution( + rosen, bounds, callback=callback_python_true + ) + assert_string_equal(result.message, expected_msg) + + # if callback raises StopIteration then solve should be interrupted + def callback_stop(intermediate_result): + raise StopIteration + + result = differential_evolution(rosen, bounds, callback=callback_stop) + assert not result.success + + def callback_evaluates_true(param, convergence=0.): + # DE should stop if bool(self.callback) is True + return [10] + + result = differential_evolution(rosen, bounds, callback=callback_evaluates_true) + assert_string_equal(result.message, expected_msg) + assert not result.success + + def callback_evaluates_false(param, convergence=0.): + return [] + + result = differential_evolution(rosen, bounds, + callback=callback_evaluates_false) + assert result.success + + def test_args_tuple_is_passed(self): + # test that the args tuple is passed to the cost function properly. + bounds = [(-10, 10)] + args = (1., 2., 3.) + + def quadratic(x, *args): + if type(args) != tuple: + raise ValueError('args should be a tuple') + return args[0] + args[1] * x + args[2] * x**2. + + result = differential_evolution(quadratic, + bounds, + args=args, + polish=True) + assert_almost_equal(result.fun, 2 / 3.) + + def test_init_with_invalid_strategy(self): + # test that passing an invalid strategy raises ValueError + func = rosen + bounds = [(-3, 3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds, + strategy='abc') + + def test_bounds_checking(self): + # test that the bounds checking works + func = rosen + bounds = [(-3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + bounds = [(-3, 3), (3, 4, 5)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + + # test that we can use a new-type Bounds object + result = differential_evolution(rosen, Bounds([0, 0], [2, 2])) + assert_almost_equal(result.x, (1., 1.)) + + def test_select_samples(self): + # select_samples should return 5 separate random numbers. + limits = np.arange(12., dtype='float64').reshape(2, 6) + bounds = list(zip(limits[0, :], limits[1, :])) + solver = DifferentialEvolutionSolver(None, bounds, popsize=1) + candidate = 0 + r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5) + assert_equal( + len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6) + + def test_maxiter_stops_solve(self): + # test that if the maximum number of iterations is exceeded + # the solver stops. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1) + result = solver.solve() + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of iterations has been exceeded.') + + def test_maxfun_stops_solve(self): + # test that if the maximum number of function evaluations is exceeded + # during initialisation the solver stops + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1, + polish=False) + result = solver.solve() + + assert_equal(result.nfev, 2) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # test that if the maximum number of function evaluations is exceeded + # during the actual minimisation, then the solver stops. + # Have to turn polishing off, as this will still occur even if maxfun + # is reached. For popsize=5 and len(bounds)=2, then there are only 10 + # function evaluations during initialisation. + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=40) + result = solver.solve() + + assert_equal(result.nfev, 41) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # now repeat for updating='deferred version + # 47 function evaluations is not a multiple of the population size, + # so maxfun is reached partway through a population evaluation. + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=47, + updating='deferred') + result = solver.solve() + + assert_equal(result.nfev, 47) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been reached.') + + def test_quadratic(self): + # test the quadratic function from object + solver = DifferentialEvolutionSolver(self.quadratic, + [(-100, 100)], + tol=0.02) + solver.solve() + assert_equal(np.argmin(solver.population_energies), 0) + + def test_quadratic_from_diff_ev(self): + # test the quadratic function from differential_evolution function + differential_evolution(self.quadratic, + [(-100, 100)], + tol=0.02) + + def test_seed_gives_repeatability(self): + result = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + seed=1, + tol=0.5) + result2 = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + seed=1, + tol=0.5) + assert_equal(result.x, result2.x) + assert_equal(result.nfev, result2.nfev) + + def test_random_generator(self): + # check that np.random.Generator can be used (numpy >= 1.17) + # obtain a np.random.Generator object + rng = np.random.default_rng() + + inits = ['random', 'latinhypercube', 'sobol', 'halton'] + for init in inits: + differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + seed=rng, + tol=0.5, + init=init) + + def test_exp_runs(self): + # test whether exponential mutation loop runs + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp', + maxiter=1) + + solver.solve() + + def test_gh_4511_regression(self): + # This modification of the differential evolution docstring example + # uses a custom popsize that had triggered an off-by-one error. + # Because we do not care about solving the optimization problem in + # this test, we use maxiter=1 to reduce the testing time. + bounds = [(-5, 5), (-5, 5)] + # result = differential_evolution(rosen, bounds, popsize=1815, + # maxiter=1) + + # the original issue arose because of rounding error in arange, with + # linspace being a much better solution. 1815 is quite a large popsize + # to use and results in a long test time (~13s). I used the original + # issue to figure out the lowest number of samples that would cause + # this rounding error to occur, 49. + differential_evolution(rosen, bounds, popsize=49, maxiter=1) + + def test_calculate_population_energies(self): + # if popsize is 3, then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3) + solver._calculate_population_energies(solver.population) + solver._promote_lowest_energy() + assert_equal(np.argmin(solver.population_energies), 0) + + # initial calculation of the energies should require 6 nfev. + assert_equal(solver._nfev, 6) + + def test_iteration(self): + # test that DifferentialEvolutionSolver is iterable + # if popsize is 3, then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3, + maxfun=12) + x, fun = next(solver) + assert_equal(np.size(x, 0), 2) + + # 6 nfev are required for initial calculation of energies, 6 nfev are + # required for the evolution of the 6 population members. + assert_equal(solver._nfev, 12) + + # the next generation should halt because it exceeds maxfun + assert_raises(StopIteration, next, solver) + + # check a proper minimisation can be done by an iterable solver + solver = DifferentialEvolutionSolver(rosen, self.bounds) + _, fun_prev = next(solver) + for i, soln in enumerate(solver): + x_current, fun_current = soln + assert fun_prev >= fun_current + _, fun_prev = x_current, fun_current + # need to have this otherwise the solver would never stop. + if i == 50: + break + + def test_convergence(self): + solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2, + polish=False) + solver.solve() + assert_(solver.convergence < 0.2) + + def test_maxiter_none_GH5731(self): + # Pre 0.17 the previous default for maxiter and maxfun was None. + # the numerical defaults are now 1000 and np.inf. However, some scripts + # will still supply None for both of those, this will raise a TypeError + # in the solve method. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None, + maxfun=None) + solver.solve() + + def test_population_initiation(self): + # test the different modes of population initiation + + # init must be either 'latinhypercube' or 'random' + # raising ValueError is something else is passed in + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': 'rubbish'}) + + solver = DifferentialEvolutionSolver(rosen, self.bounds) + + # check that population initiation: + # 1) resets _nfev to 0 + # 2) all population energies are np.inf + solver.init_population_random() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver.init_population_lhs() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver.init_population_qmc(qmc_engine='halton') + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol') + solver.init_population_qmc(qmc_engine='sobol') + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + # we should be able to initialize with our own array + population = np.linspace(-1, 3, 10).reshape(5, 2) + solver = DifferentialEvolutionSolver(rosen, self.bounds, + init=population, + strategy='best2bin', + atol=0.01, seed=1, popsize=5) + + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + assert_(solver.num_population_members == 5) + assert_(solver.population_shape == (5, 2)) + + # check that the population was initialized correctly + unscaled_population = np.clip(solver._unscale_parameters(population), + 0, 1) + assert_almost_equal(solver.population[:5], unscaled_population) + + # population values need to be clipped to bounds + assert_almost_equal(np.min(solver.population[:5]), 0) + assert_almost_equal(np.max(solver.population[:5]), 1) + + # shouldn't be able to initialize with an array if it's the wrong shape + # this would have too many parameters + population = np.linspace(-1, 3, 15).reshape(5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': population}) + + # provide an initial solution + # bounds are [(0, 2), (0, 2)] + x0 = np.random.uniform(low=0.0, high=2.0, size=2) + solver = DifferentialEvolutionSolver( + rosen, self.bounds, x0=x0 + ) + # parameters are scaled to unit interval + assert_allclose(solver.population[0], x0 / 2.0) + + def test_x0(self): + # smoke test that checks that x0 is usable. + res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8]) + assert res.success + + # check what happens if some of the x0 lay outside the bounds + with assert_raises(ValueError): + differential_evolution(rosen, self.bounds, x0=[0.2, 2.1]) + + def test_infinite_objective_function(self): + # Test that there are no problems if the objective function + # returns inf on some runs + def sometimes_inf(x): + if x[0] < .5: + return np.inf + return x[1] + bounds = [(0, 1), (0, 1)] + differential_evolution(sometimes_inf, bounds=bounds, disp=False) + + def test_deferred_updating(self): + # check setting of deferred updating, with default workers + bounds = [(0., 2.), (0., 2.)] + solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred') + assert_(solver._updating == 'deferred') + assert_(solver._mapwrapper._mapfunc is map) + solver.solve() + + def test_immediate_updating(self): + # check setting of immediate updating, with default workers + bounds = [(0., 2.), (0., 2.)] + solver = DifferentialEvolutionSolver(rosen, bounds) + assert_(solver._updating == 'immediate') + + # Safely forking from a multithreaded process is + # problematic, and deprecated in Python 3.12, so + # we use a slower but portable alternative + # see gh-19848 + ctx = multiprocessing.get_context("spawn") + with ctx.Pool(2) as p: + # should raise a UserWarning because the updating='immediate' + # is being overridden by the workers keyword + with warns(UserWarning): + with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s: + pass + assert s._updating == 'deferred' + + def test_parallel(self): + # smoke test for parallelization with deferred updating + bounds = [(0., 2.), (0., 2.)] + with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver( + rosen, bounds, updating='deferred', workers=p.map) as solver: + assert_(solver._mapwrapper.pool is not None) + assert_(solver._updating == 'deferred') + solver.solve() + + with DifferentialEvolutionSolver(rosen, bounds, updating='deferred', + workers=2) as solver: + assert_(solver._mapwrapper.pool is not None) + assert_(solver._updating == 'deferred') + solver.solve() + + def test_converged(self): + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)]) + solver.solve() + assert_(solver.converged()) + + def test_constraint_violation_fn(self): + def constr_f(x): + return [x[0] + x[1]] + + def constr_f2(x): + return np.array([x[0]**2 + x[1], x[0] - x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc)) + + cv = solver._constraint_violation_fn(np.array([1.0, 1.0])) + assert_almost_equal(cv, 0.1) + + nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2)) + + # for multiple constraints the constraint violations should + # be concatenated. + xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)] + vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)] + + for x, v in zip(xs, vs): + cv = solver._constraint_violation_fn(np.array(x)) + assert_allclose(cv, np.atleast_2d(v)) + + # vectorized calculation of a series of solutions + assert_allclose( + solver._constraint_violation_fn(np.array(xs)), np.array(vs) + ) + + # the following line is used in _calculate_population_feasibilities. + # _constraint_violation_fn returns an (1, M) array when + # x.shape == (N,), i.e. a single solution. Therefore this list + # comprehension should generate (S, 1, M) array. + constraint_violation = np.array([solver._constraint_violation_fn(x) + for x in np.array(xs)]) + assert constraint_violation.shape == (3, 1, 3) + + # we need reasonable error messages if the constraint function doesn't + # return the right thing + def constr_f3(x): + # returns (S, M), rather than (M, S) + return constr_f2(x).T + + nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2), + vectorized=False) + solver.vectorized = True + with pytest.raises( + RuntimeError, match="An array returned from a Constraint" + ): + solver._constraint_violation_fn(np.array(xs)) + + def test_constraint_population_feasibilities(self): + def constr_f(x): + return [x[0] + x[1]] + + def constr_f2(x): + return [x[0]**2 + x[1], x[0] - x[1]] + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc)) + + # are population feasibilities correct + # [0.5, 0.5] corresponds to scaled values of [1., 1.] + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [1., 1.]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1], [2.1]])) + assert cv.shape == (2, 1) + + nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) + + for vectorize in [False, True]: + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc, nlc2), + vectorized=vectorize, + updating='deferred') + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [0.6, 0.5]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]])) + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.5, 0.5], [1., 1.]])) + assert_equal(feas, [False, False]) + assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]])) + assert cv.shape == (2, 3) + + feas, cv = solver._calculate_population_feasibilities( + np.array([[0.25, 0.25], [1., 1.]])) + assert_equal(feas, [True, False]) + assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]])) + assert cv.shape == (2, 3) + + def test_constraint_solve(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc)) + + # trust-constr warns if the constraint function is linear + with warns(UserWarning): + res = solver.solve() + + assert constr_f(res.x) <= 1.9 + assert res.success + + def test_impossible_constraint(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + nlc = NonlinearConstraint(constr_f, -np.inf, -1) + + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc), popsize=3, + seed=1) + + # a UserWarning is issued because the 'trust-constr' polishing is + # attempted on the least infeasible solution found. + with warns(UserWarning): + res = solver.solve() + + assert res.maxcv > 0 + assert not res.success + + # test _promote_lowest_energy works when none of the population is + # feasible. In this case, the solution with the lowest constraint + # violation should be promoted. + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc), polish=False) + next(solver) + assert not solver.feasible.all() + assert not np.isfinite(solver.population_energies).all() + + # now swap two of the entries in the population + l = 20 + cv = solver.constraint_violation[0] + + solver.population_energies[[0, l]] = solver.population_energies[[l, 0]] + solver.population[[0, l], :] = solver.population[[l, 0], :] + solver.constraint_violation[[0, l], :] = ( + solver.constraint_violation[[l, 0], :]) + + solver._promote_lowest_energy() + assert_equal(solver.constraint_violation[0], cv) + + def test_accept_trial(self): + # _accept_trial(self, energy_trial, feasible_trial, cv_trial, + # energy_orig, feasible_orig, cv_orig) + def constr_f(x): + return [x[0] + x[1]] + nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], + constraints=(nlc)) + fn = solver._accept_trial + # both solutions are feasible, select lower energy + assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.])) + assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False) + assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.])) + + # trial is feasible, original is not + assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.])) + + # trial and original are infeasible + # cv_trial have to be <= cv_original to be better + assert (fn(0.1, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1., 1.0]))) + assert (fn(0.1, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1., 0.50]))) + assert not (fn(1.0, False, np.array([0.5, 0.5]), + 1.0, False, np.array([1.0, 0.4]))) + + def test_constraint_wrapper(self): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + pc = _ConstraintWrapper(Bounds(lb, ub), x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([0.25, 21, 31]) == 0).all() + + # check vectorized Bounds constraint + xs = np.arange(1, 16).reshape(5, 3) + violations = [] + for x in xs: + violations.append(pc.violation(x)) + np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + # check vectorized LinearConstraint, for 7 lots of parameter vectors + # with each parameter vector being 4 long, with 3 constraints + # xs is the same shape as stored in the differential evolution + # population, but it's sent to the violation function as (len(x), M) + xs = np.arange(1, 29).reshape(7, 4) + violations = [] + for x in xs: + violations.append(pc.violation(x)) + np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) + + pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0), + x0) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def fun(x): + return A.dot(x) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0) + pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4]) + assert (pc.violation(x0) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def test_constraint_wrapper_violation(self): + def cons_f(x): + # written in vectorised form to accept an array of (N, S) + # returning (M, S) + # where N is the number of parameters, + # S is the number of solution vectors to be examined, + # and M is the number of constraint components + return np.array([x[0] ** 2 + x[1], + x[0] ** 2 - x[1]]) + + nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) + pc = _ConstraintWrapper(nlc, [0.5, 1]) + assert np.size(pc.bounds[0]) == 2 + + xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)] + vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)] + + for x, v in zip(xs, vs): + assert_allclose(pc.violation(x), v) + + # now check that we can vectorize the constraint wrapper + assert_allclose(pc.violation(np.array(xs).T), + np.array(vs).T) + assert pc.fun(np.array(xs).T).shape == (2, len(xs)) + assert pc.violation(np.array(xs).T).shape == (2, len(xs)) + assert pc.num_constr == 2 + assert pc.parameter_count == 2 + + def test_matrix_linear_constraint(self): + # gh20041 supplying an np.matrix to construct a LinearConstraint caused + # _ConstraintWrapper to start returning constraint violations of the + # wrong shape. + with suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + matrix = np.matrix([[1, 1, 1, 1.], + [2, 2, 2, 2.]]) + lc = LinearConstraint(matrix, 0, 1) + x0 = np.ones(4) + cw = _ConstraintWrapper(lc, x0) + # the shape of the constraint violation should be the same as the number + # of constraints applied. + assert cw.violation(x0).shape == (2,) + + # let's try a vectorised violation call. + xtrial = np.arange(4 * 5).reshape(4, 5) + assert cw.violation(xtrial).shape == (2, 5) + + + def test_L1(self): + # Lampinen ([5]) test problem 1 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:]) + return fun + + A = np.zeros((10, 14)) # 1-indexed to match reference + A[1, [1, 2, 10, 11]] = 2, 2, 1, 1 + A[2, [1, 10]] = -8, 1 + A[3, [4, 5, 10]] = -2, -1, 1 + A[4, [1, 3, 10, 11]] = 2, 2, 1, 1 + A[5, [2, 11]] = -8, 1 + A[6, [6, 7, 11]] = -2, -1, 1 + A[7, [2, 3, 11, 12]] = 2, 2, 1, 1 + A[8, [3, 12]] = -8, 1 + A[9, [8, 9, 12]] = -2, -1, 1 + A = A[1:, 1:] + + b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0]) + + L = LinearConstraint(A, -np.inf, b) + + bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)] + + # using a lower popsize to speed the test up + res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, + constraints=(L), popsize=2) + + x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1) + f_opt = -15 + + assert_allclose(f(x_opt), f_opt, atol=6e-4) + assert res.success + assert_allclose(res.x, x_opt, atol=6e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + # now repeat the same solve, using the same overall constraints, + # but using a sparse matrix for the LinearConstraint instead of an + # array + + L = LinearConstraint(csr_matrix(A), -np.inf, b) + + # using a lower popsize to speed the test up + res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, + constraints=(L), popsize=2) + + assert_allclose(f(x_opt), f_opt) + assert res.success + assert_allclose(res.x, x_opt, atol=5e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + # now repeat the same solve, using the same overall constraints, + # but specify half the constraints in terms of LinearConstraint, + # and the other half by NonlinearConstraint + def c1(x): + x = np.hstack(([0], x)) + return [2*x[2] + 2*x[3] + x[11] + x[12], + -8*x[3] + x[12]] + + def c2(x): + x = np.hstack(([0], x)) + return -2*x[8] - x[9] + x[12] + + L = LinearConstraint(A[:5, :], -np.inf, b[:5]) + L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6]) + N = NonlinearConstraint(c1, -np.inf, b[6:8]) + N2 = NonlinearConstraint(c2, -np.inf, b[8:9]) + constraints = (L, N, L2, N2) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, strategy='rand1bin', + seed=1234, constraints=constraints, + popsize=2) + + assert_allclose(res.x, x_opt, atol=6e-4) + assert_allclose(res.fun, f_opt, atol=5e-3) + assert_(np.all(A@res.x <= b)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L2(self): + # Lampinen ([5]) test problem 2 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 + + 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] - + 8*x[7]) + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5], + 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7], + 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5], + -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 - + 5*x[6] + 11*x[7]] + + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(-10, 10)]*7 + constraints = (N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, strategy='rand1bin', + seed=1234, constraints=constraints) + + f_opt = 680.6300599487869 + x_opt = (2.330499, 1.951372, -0.4775414, 4.365726, + -0.6244870, 1.038131, 1.594227) + + assert_allclose(f(x_opt), f_opt) + assert_allclose(res.fun, f_opt) + assert_allclose(res.x, x_opt, atol=1e-5) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L3(self): + # Lampinen ([5]) test problem 3 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] + + (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 + + 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 + + (x[10] - 7)**2 + 45 + ) + return fun # maximize + + A = np.zeros((4, 11)) + A[1, [1, 2, 7, 8]] = -4, -5, 3, -9 + A[2, [1, 2, 7, 8]] = -10, 8, 17, -2 + A[3, [1, 2, 9, 10]] = 8, -2, -5, 2 + A = A[1:, 1:] + b = np.array([-105, 0, -12]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10], + -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120, + -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6], + -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40, + -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30] + + L = LinearConstraint(A, b, np.inf) + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(-10, 10)]*10 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, seed=1234, + constraints=constraints, popsize=3) + + x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548, + 1.430574, 1.321644, 9.828726, 8.280092, 8.375927) + f_opt = 24.3062091 + + assert_allclose(f(x_opt), f_opt, atol=1e-5) + assert_allclose(res.x, x_opt, atol=1e-6) + assert_allclose(res.fun, f_opt, atol=1e-5) + assert res.success + assert_(np.all(A @ res.x >= b)) + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L4(self): + # Lampinen ([5]) test problem 4 + def f(x): + return np.sum(x[:3]) + + A = np.zeros((4, 9)) + A[1, [4, 6]] = 0.0025, 0.0025 + A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025 + A[3, [8, 5]] = 0.01, -0.01 + A = A[1:, 1:] + b = np.array([1, 1, 1]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333, + x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4], + x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]] + + L = LinearConstraint(A, -np.inf, 1) + N = NonlinearConstraint(c1, 0, np.inf) + + bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + res = differential_evolution(f, bounds, strategy='rand1bin', + seed=1234, constraints=constraints, + popsize=3) + + f_opt = 7049.248 + + x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172, + 217.9823, 286.416528, 395.601172] + + assert_allclose(f(x_opt), f_opt, atol=0.001) + assert_allclose(res.fun, f_opt, atol=0.001) + + # use higher tol here for 32-bit Windows, see gh-11693 + if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8): + assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035) + else: + # tolerance determined from macOS + MKL failure, see gh-12701 + assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024) + + assert res.success + assert_(np.all(A @ res.x <= b)) + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L5(self): + # Lampinen ([5]) test problem 5 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) / + (x[1]**3*(x[1]+x[2]))) + return -fun # maximize + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[1]**2 - x[2] + 1, + 1 - x[1] + (x[2]-4)**2] + + N = NonlinearConstraint(c1, -np.inf, 0) + bounds = [(0, 10)]*2 + constraints = (N) + + res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, + constraints=constraints) + + x_opt = (1.22797135, 4.24537337) + f_opt = -0.095825 + assert_allclose(f(x_opt), f_opt, atol=2e-5) + assert_allclose(res.fun, f_opt, atol=1e-4) + assert res.success + assert_(np.all(np.array(c1(res.x)) <= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L6(self): + # Lampinen ([5]) test problem 6 + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (x[1]-10)**3 + (x[2] - 20)**3 + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [(x[1]-5)**2 + (x[2] - 5)**2 - 100, + -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81] + + N = NonlinearConstraint(c1, 0, np.inf) + bounds = [(13, 100), (0, 100)] + constraints = (N) + res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, + constraints=constraints, tol=1e-7) + x_opt = (14.095, 0.84296) + f_opt = -6961.814744 + + assert_allclose(f(x_opt), f_opt, atol=1e-6) + assert_allclose(res.fun, f_opt, atol=0.001) + assert_allclose(res.x, x_opt, atol=1e-4) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= 0)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L7(self): + # Lampinen ([5]) test problem 7 + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] + + 37.293239*x[1] - 40792.141) + return fun + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [ + 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] - + 0.0022053*x[3]*x[5], + + 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] + + 0.0021813*x[3]**2, + + 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] + + 0.0019085*x[3]*x[4] + ] + + N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25]) + + bounds = [(78, 102), (33, 45)] + [(27, 45)]*3 + constraints = (N) + + res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, + constraints=constraints) + + # using our best solution, rather than Lampinen/Koziel. Koziel solution + # doesn't satisfy constraints, Lampinen f_opt just plain wrong. + x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971, + 36.77579979] + + f_opt = -30665.537578 + + assert_allclose(f(x_opt), f_opt) + assert_allclose(res.x, x_opt, atol=1e-3) + assert_allclose(res.fun, f_opt, atol=1e-3) + + assert res.success + assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20]))) + assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25]))) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + @pytest.mark.slow + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_L8(self): + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3 + return fun + + A = np.zeros((3, 5)) + A[1, [4, 3]] = 1, -1 + A[2, [3, 4]] = 1, -1 + A = A[1:, 1:] + b = np.array([-.55, -.55]) + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [ + 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) + + 894.8 - x[1], + 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) + + 894.8 - x[2], + 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) + + 1294.8 + ] + L = LinearConstraint(A, b, np.inf) + N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001)) + + bounds = [(0, 1200)]*2+[(-.55, .55)]*2 + constraints = (L, N) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + # original Lampinen test was with rand1bin, but that takes a + # huge amount of CPU time. Changing strategy to best1bin speeds + # things up a lot + res = differential_evolution(f, bounds, strategy='best1bin', + seed=1234, constraints=constraints, + maxiter=5000) + + x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336) + f_opt = 5126.4981 + + assert_allclose(f(x_opt), f_opt, atol=1e-3) + assert_allclose(res.x[:2], x_opt[:2], atol=2e-3) + assert_allclose(res.x[2:], x_opt[2:], atol=2e-3) + assert_allclose(res.fun, f_opt, atol=2e-2) + assert res.success + assert_(np.all(A@res.x >= b)) + assert_(np.all(np.array(c1(res.x)) >= -0.001)) + assert_(np.all(np.array(c1(res.x)) <= 0.001)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_L9(self): + # Lampinen ([5]) test problem 9 + + def f(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return x[1]**2 + (x[2]-1)**2 + + def c1(x): + x = np.hstack(([0], x)) # 1-indexed to match reference + return [x[2] - x[1]**2] + + N = NonlinearConstraint(c1, [-.001], [0.001]) + + bounds = [(-1, 1)]*2 + constraints = (N) + res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, + constraints=constraints) + + x_opt = [np.sqrt(2)/2, 0.5] + f_opt = 0.75 + + assert_allclose(f(x_opt), f_opt) + assert_allclose(np.abs(res.x), x_opt, atol=1e-3) + assert_allclose(res.fun, f_opt, atol=1e-3) + assert res.success + assert_(np.all(np.array(c1(res.x)) >= -0.001)) + assert_(np.all(np.array(c1(res.x)) <= 0.001)) + assert_(np.all(res.x >= np.array(bounds)[:, 0])) + assert_(np.all(res.x <= np.array(bounds)[:, 1])) + + def test_integrality(self): + # test fitting discrete distribution to data + rng = np.random.default_rng(6519843218105) + dist = stats.nbinom + shapes = (5, 0.5) + x = dist.rvs(*shapes, size=10000, random_state=rng) + + def func(p, *args): + dist, x = args + # negative log-likelihood function + ll = -np.log(dist.pmf(x, *p)).sum(axis=-1) + if np.isnan(ll): # occurs when x is outside of support + ll = np.inf # we don't want that + return ll + + integrality = [True, False] + bounds = [(1, 18), (0, 0.95)] + + res = differential_evolution(func, bounds, args=(dist, x), + integrality=integrality, polish=False, + seed=rng) + # tolerance has to be fairly relaxed for the second parameter + # because we're fitting a distribution to random variates. + assert res.x[0] == 5 + assert_allclose(res.x, shapes, rtol=0.025) + + # check that we can still use integrality constraints with polishing + res2 = differential_evolution(func, bounds, args=(dist, x), + integrality=integrality, polish=True, + seed=rng) + + def func2(p, *args): + n, dist, x = args + return func(np.array([n, p[0]]), dist, x) + + # compare the DE derived solution to an LBFGSB solution (that doesn't + # have to find the integral values). Note we're setting x0 to be the + # output from the first DE result, thereby making the polishing step + # and this minimisation pretty much equivalent. + LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x), + bounds=[(0, 0.95)]) + assert_allclose(res2.x[1], LBFGSB.x) + assert res2.fun <= res.fun + + def test_integrality_limits(self): + def f(x): + return x + + integrality = [True, False, True] + bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)] + + # no integrality constraints + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=False) + assert_allclose(solver.limits[0], [0.2, 0.9, 3.3]) + assert_allclose(solver.limits[1], [1.1, 2.2, 4.9]) + + # with integrality constraints + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [0.5, 0.9, 3.5]) + assert_allclose(solver.limits[1], [1.5, 2.2, 4.5]) + assert_equal(solver.integrality, [True, False, True]) + assert solver.polish is False + + bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)] + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5]) + assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5]) + + # A lower bound of -1.2 is converted to + # np.nextafter(np.ceil(-1.2) - 0.5, np.inf) + # with a similar process to the upper bound. Check that the + # conversions work + assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0]) + assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0]) + + bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)] + solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5]) + assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5]) + + bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)] + with pytest.raises(ValueError, match='One of the integrality'): + DifferentialEvolutionSolver(f, bounds=bounds, polish=False, + integrality=integrality) + + def test_vectorized(self): + def quadratic(x): + return np.sum(x**2) + + def quadratic_vec(x): + return np.sum(x**2, axis=0) + + # A vectorized function needs to accept (len(x), S) and return (S,) + with pytest.raises(RuntimeError, match='The vectorized function'): + differential_evolution(quadratic, self.bounds, + vectorized=True, updating='deferred') + + # vectorized overrides the updating keyword, check for warning + with warns(UserWarning, match="differential_evolution: the 'vector"): + differential_evolution(quadratic_vec, self.bounds, + vectorized=True) + + # vectorized defers to the workers keyword, check for warning + with warns(UserWarning, match="differential_evolution: the 'workers"): + differential_evolution(quadratic_vec, self.bounds, + vectorized=True, workers=map, + updating='deferred') + + ncalls = [0] + + def rosen_vec(x): + ncalls[0] += 1 + return rosen(x) + + bounds = [(0, 10), (0, 10)] + res1 = differential_evolution(rosen, bounds, updating='deferred', + seed=1) + res2 = differential_evolution(rosen_vec, bounds, vectorized=True, + updating='deferred', seed=1) + + # the two minimisation runs should be functionally equivalent + assert_allclose(res1.x, res2.x) + assert ncalls[0] == res2.nfev + assert res1.nit == res2.nit + + def test_vectorized_constraints(self): + def constr_f(x): + return np.array([x[0] + x[1]]) + + def constr_f2(x): + return np.array([x[0]**2 + x[1], x[0] - x[1]]) + + nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9) + nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0)) + + def rosen_vec(x): + # accept an (len(x0), S) array, returning a (S,) array + v = 100 * (x[1:] - x[:-1]**2.0)**2.0 + v += (1 - x[:-1])**2.0 + return np.squeeze(v) + + bounds = [(0, 10), (0, 10)] + + res1 = differential_evolution(rosen, bounds, updating='deferred', + seed=1, constraints=[nlc1, nlc2], + polish=False) + res2 = differential_evolution(rosen_vec, bounds, vectorized=True, + updating='deferred', seed=1, + constraints=[nlc1, nlc2], + polish=False) + # the two minimisation runs should be functionally equivalent + assert_allclose(res1.x, res2.x) + + def test_constraint_violation_error_message(self): + + def func(x): + return np.cos(x[0]) + np.sin(x[1]) + + # Intentionally infeasible constraints. + c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf) + c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0) + + result = differential_evolution(func, + bounds=[(-1, 2), (-1, 1)], + constraints=[c0, c1], + maxiter=10, + polish=False, + seed=864197532) + assert result.success is False + # The numerical value in the error message might be sensitive to + # changes in the implementation. It can be updated if the code is + # changed. The essential part of the test is that there is a number + # after the '=', so if necessary, the text could be reduced to, say, + # "MAXCV = 0.". + assert "MAXCV = 0.414" in result.message + + def test_strategy_fn(self): + # examines ability to customize strategy by mimicking one of the + # in-built strategies and comparing to the actual in-built strategy. + parameter_count = 4 + popsize = 10 + bounds = [(0, 10.)] * parameter_count + total_popsize = parameter_count * popsize + mutation = 0.8 + recombination = 0.7 + + def custom_strategy_fn(candidate, population, rng=None): + trial = np.copy(population[candidate]) + fill_point = rng.choice(parameter_count) + + pool = np.arange(total_popsize) + rng.shuffle(pool) + + idxs = [] + while len(idxs) < 2 and len(pool) > 0: + idx = pool[0] + pool = pool[1:] + if idx != candidate: + idxs.append(idx) + + r0, r1 = idxs[:2] + + bprime = (population[0] + mutation * + (population[r0] - population[r1])) + + crossovers = rng.uniform(size=parameter_count) + crossovers = crossovers < recombination + crossovers[fill_point] = True + trial = np.where(crossovers, bprime, trial) + return trial + + solver = DifferentialEvolutionSolver( + rosen, + bounds, + popsize=popsize, + recombination=recombination, + mutation=mutation, + maxiter=2, + strategy=custom_strategy_fn, + seed=10, + polish=False + ) + assert solver.strategy is custom_strategy_fn + res = solver.solve() + + res2 = differential_evolution( + rosen, + bounds, + mutation=mutation, + popsize=popsize, + recombination=recombination, + maxiter=2, + strategy='best1bin', + polish=False, + seed=10 + ) + assert_allclose(res.population, res2.population) + assert_allclose(res.x, res2.x) + + def custom_strategy_fn(candidate, population, rng=None): + return np.array([1.0, 2.0]) + + with pytest.raises(RuntimeError, match="strategy*"): + differential_evolution( + rosen, + bounds, + strategy=custom_strategy_fn + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..9a44333d0a8199195a2b35af3f4badf615712b2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py @@ -0,0 +1,379 @@ +# Dual annealing unit tests implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, PMP S.A. +""" +Unit tests for the dual annealing global optimizer +""" +from scipy.optimize import dual_annealing, Bounds + +from scipy.optimize._dual_annealing import EnergyState +from scipy.optimize._dual_annealing import LocalSearchWrapper +from scipy.optimize._dual_annealing import ObjectiveFunWrapper +from scipy.optimize._dual_annealing import StrategyChain +from scipy.optimize._dual_annealing import VisitingDistribution +from scipy.optimize import rosen, rosen_der +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_array_less +from pytest import raises as assert_raises +from scipy._lib._util import check_random_state + + +class TestDualAnnealing: + + def setup_method(self): + # A function that returns always infinity for initialization tests + self.weirdfunc = lambda x: np.inf + # 2-D bounds for testing function + self.ld_bounds = [(-5.12, 5.12)] * 2 + # 4-D bounds for testing function + self.hd_bounds = self.ld_bounds * 4 + # Number of values to be generated for testing visit function + self.nbtestvalues = 5000 + self.high_temperature = 5230 + self.low_temperature = 0.1 + self.qv = 2.62 + self.seed = 1234 + self.rs = check_random_state(self.seed) + self.nb_fun_call = 0 + self.ngev = 0 + + def callback(self, x, f, context): + # For testing callback mechanism. Should stop for e <= 1 as + # the callback function returns True + if f <= 1.0: + return True + + def func(self, x, args=()): + # Using Rastrigin function for performing tests + if args: + shift = args + else: + shift = 0 + y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * ( + x - shift))) + 10 * np.size(x) + shift + self.nb_fun_call += 1 + return y + + def rosen_der_wrapper(self, x, args=()): + self.ngev += 1 + return rosen_der(x, *args) + + # FIXME: there are some discontinuities in behaviour as a function of `qv`, + # this needs investigating - see gh-12384 + @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9]) + def test_visiting_stepping(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + dim = lower.size + vd = VisitingDistribution(lower, upper, qv, self.rs) + values = np.zeros(dim) + x_step_low = vd.visiting(values, 0, self.high_temperature) + # Make sure that only the first component is changed + assert_equal(np.not_equal(x_step_low, 0), True) + values = np.zeros(dim) + x_step_high = vd.visiting(values, dim, self.high_temperature) + # Make sure that component other than at dim has changed + assert_equal(np.not_equal(x_step_high[0], 0), True) + + @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9]) + def test_visiting_dist_high_temperature(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + vd = VisitingDistribution(lower, upper, qv, self.rs) + # values = np.zeros(self.nbtestvalues) + # for i in np.arange(self.nbtestvalues): + # values[i] = vd.visit_fn(self.high_temperature) + values = vd.visit_fn(self.high_temperature, self.nbtestvalues) + + # Visiting distribution is a distorted version of Cauchy-Lorentz + # distribution, and as no 1st and higher moments (no mean defined, + # no variance defined). + # Check that big tails values are generated + assert_array_less(np.min(values), 1e-10) + assert_array_less(1e+10, np.max(values)) + + def test_reset(self): + owf = ObjectiveFunWrapper(self.weirdfunc) + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + es = EnergyState(lower, upper) + assert_raises(ValueError, es.reset, owf, check_random_state(None)) + + def test_low_dim(self): + ret = dual_annealing( + self.func, self.ld_bounds, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + def test_high_dim(self): + ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + def test_low_dim_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_high_dim_no_ls(self): + ret = dual_annealing(self.func, self.hd_bounds, + no_local_search=True, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_nb_fun_call(self): + ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_nb_fun_call_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, seed=self.seed) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_max_reinit(self): + assert_raises(ValueError, dual_annealing, self.weirdfunc, + self.ld_bounds) + + def test_reproduce(self): + res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + assert_equal(res1.x, res3.x) + + def test_rand_gen(self): + # check that np.random.Generator can be used (numpy >= 1.17) + # obtain a np.random.Generator object + rng = np.random.default_rng(1) + + res1 = dual_annealing(self.func, self.ld_bounds, seed=rng) + # seed again + rng = np.random.default_rng(1) + res2 = dual_annealing(self.func, self.ld_bounds, seed=rng) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + + def test_bounds_integrity(self): + wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] + assert_raises(ValueError, dual_annealing, self.func, + wrong_bounds) + + def test_bound_validity(self): + invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + + def test_deprecated_local_search_options_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + def test_minimizer_kwargs_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "SLSQP", "bounds": bounds}) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + def test_max_fun_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, maxfun=100, + seed=self.seed) + + ls_max_iter = min(max( + len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, + LocalSearchWrapper.LS_MAXITER_MIN), + LocalSearchWrapper.LS_MAXITER_MAX) + assert ret.nfev <= 100 + ls_max_iter + assert not ret.success + + def test_max_fun_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, maxfun=500, seed=self.seed) + assert ret.nfev <= 500 + assert not ret.success + + def test_maxiter(self): + ret = dual_annealing(self.func, self.ld_bounds, maxiter=700, + seed=self.seed) + assert ret.nit <= 700 + + # Testing that args are passed correctly for dual_annealing + def test_fun_args_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159,)), seed=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-6) + + # Testing that args are passed correctly for pure simulated annealing + def test_fun_args_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159, )), no_local_search=True, + seed=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-4) + + def test_callback_stop(self): + # Testing that callback make the algorithm stop for + # fun value <= 1.0 (see callback method) + ret = dual_annealing(self.func, self.ld_bounds, + callback=self.callback, seed=self.seed) + assert ret.fun <= 1.0 + assert 'stop early' in ret.message[0] + assert not ret.success + + @pytest.mark.parametrize('method, atol', [ + ('Nelder-Mead', 2e-5), + ('COBYLA', 1e-5), + ('Powell', 1e-8), + ('CG', 1e-8), + ('BFGS', 1e-8), + ('TNC', 1e-8), + ('SLSQP', 2e-7), + ]) + def test_multi_ls_minimizer(self, method, atol): + ret = dual_annealing(self.func, self.ld_bounds, + minimizer_kwargs=dict(method=method), + seed=self.seed) + assert_allclose(ret.fun, 0., atol=atol) + + def test_wrong_restart_temp(self): + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=1) + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=0) + + def test_gradient_gnev(self): + minimizer_opts = { + 'jac': self.rosen_der_wrapper, + } + ret = dual_annealing(rosen, self.ld_bounds, + minimizer_kwargs=minimizer_opts, + seed=self.seed) + assert ret.njev == self.ngev + + def test_from_docstring(self): + def func(x): + return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + lw = [-5.12] * 10 + up = [5.12] * 10 + ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) + assert_allclose(ret.x, + [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, + -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, + -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, + -5.00668935e-09], atol=4e-8) + assert_allclose(ret.fun, 0.000000, atol=5e-13) + + @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [ + (0, 100, 1000, 1.0097587941791923), + (0, 2, 1000, 1.2599210498948732), + (10, 100, 878, 0.8786035869128718), + (10, 60, 695, 0.6812920690579612), + (2, 100, 990, 0.9897404249173424), + ]) + def test_accept_reject_probabilistic( + self, new_e, temp_step, accepted, accept_rate): + # Test accepts unconditionally with e < current_energy and + # probabilistically with e > current_energy + + rs = check_random_state(123) + + count_accepted = 0 + iterations = 1000 + + accept_param = -5 + current_energy = 1 + for _ in range(iterations): + energy_state = EnergyState(lower=None, upper=None) + # Set energy state with current_energy, any location. + energy_state.update_current(current_energy, [0]) + + chain = StrategyChain( + accept_param, None, None, None, rs, energy_state) + # Normally this is set in run() + chain.temperature_step = temp_step + + # Check if update is accepted. + chain.accept_reject(j=1, e=new_e, x_visit=[2]) + if energy_state.current_energy == new_e: + count_accepted += 1 + + assert count_accepted == accepted + + # Check accept rate + pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step + rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param)) + + assert_allclose(rate, accept_rate) + + def test_bounds_class(self): + # test that result does not depend on the bounds type + def func(x): + f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + return f + lw = [-5.12] * 5 + up = [5.12] * 5 + + # Unbounded global minimum is all zeros. Most bounds below will force + # a DV away from unbounded minimum and be active at solution. + up[0] = -2.0 + up[1] = -1.0 + lw[3] = 1.0 + lw[4] = 2.0 + + # run optimizations + bounds = Bounds(lw, up) + ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234) + + bounds_old = list(zip(lw, up)) + ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234) + + # test that found minima, function evaluations and iterations match + assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8) + assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7) + assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9) + assert ret_bounds_list.nfev == ret_bounds_class.nfev + + def test_callable_jac_with_args_gh11052(self): + # dual_annealing used to fail when `jac` was callable and `args` were + # used; check that this is resolved. Example is from gh-11052. + rng = np.random.default_rng(94253637693657847462) + def f(x, power): + return np.sum(np.exp(x ** power)) + + def jac(x, power): + return np.exp(x ** power) * power * x ** (power - 1) + + res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, + minimizer_kwargs=dict(method='L-BFGS-B')) + res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, + minimizer_kwargs=dict(method='L-BFGS-B', + jac=jac)) + assert_allclose(res1.fun, res2.fun, rtol=1e-6) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0e4097bc9aadbfd3335aa3a86d063216f2c69a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py @@ -0,0 +1,310 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +from scipy.optimize._linprog_util import _clean_inputs, _LPProblem +from scipy._lib._util import VisibleDeprecationWarning +from copy import deepcopy +from datetime import date + + +def test_aliasing(): + """ + Test for ensuring that no objects referred to by `lp` attributes, + `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified + by `_clean_inputs` as a side effect. + """ + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=[1], + A_eq=[[1]], + b_eq=[1], + bounds=(-np.inf, np.inf) + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_(lp.c == lp_copy.c, "c modified by _clean_inputs") + assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs") + assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs") + assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs") + assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_aliasing2(): + """ + Similar purpose as `test_aliasing` above. + """ + lp = _LPProblem( + c=np.array([1, 1]), + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([[1], [1]]), + A_eq=np.array([[1, 1]]), + b_eq=np.array([1]), + bounds=[(-np.inf, np.inf), (None, 1)] + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs") + assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs") + assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs") + assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs") + assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_missing_inputs(): + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + + assert_raises(TypeError, _clean_inputs) + assert_raises(TypeError, _clean_inputs, _LPProblem(c=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq)) + + +def test_too_many_dimensions(): + cb = [1, 2, 3, 4] + A = np.random.rand(4, 4) + bad2D = [[1, 2], [3, 4]] + bad3D = np.random.rand(4, 4, 4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D)) + + +def test_too_few_dimensions(): + bad = np.random.rand(4, 4).ravel() + cb = np.random.rand(4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb)) + + +def test_inconsistent_dimensions(): + m = 2 + n = 4 + c = [1, 2, 3, 4] + + Agood = np.random.rand(m, n) + Abad = np.random.rand(m, n + 1) + bgood = np.random.rand(m) + bbad = np.random.rand(m + 1) + boundsbad = [(0, 1)] * (n + 1) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad)) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]])) + + +def test_type_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + bad = "hello" + + assert_raises(TypeError, _clean_inputs, lp._replace(c=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad)) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad)) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi")) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")])) + assert_raises(TypeError, _clean_inputs, + lp._replace(bounds=[(1, date(2020, 2, 29))])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]])) + + +def test_non_finite_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0])) + + assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1])) + assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan])) + + +def test__clean_inputs1(): + lp = _LPProblem( + c=[1, 2], + A_ub=[[1, 1], [2, 2]], + b_ub=[1, 1], + A_eq=[[1, 1], [2, 2]], + b_eq=[1, 1], + bounds=None + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.A_ub.shape == (2, 2), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.A_eq.shape == (2, 2), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test__clean_inputs2(): + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=1, + A_eq=[[1]], + b_eq=1, + bounds=(0, 1) + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, 1)]) + + assert_(lp_cleaned.c.shape == (1,), "") + assert_(lp_cleaned.A_ub.shape == (1, 1), "") + assert_(lp_cleaned.b_ub.shape == (1,), "") + assert_(lp_cleaned.A_eq.shape == (1, 1), "") + assert_(lp_cleaned.b_eq.shape == (1,), "") + + +def test__clean_inputs3(): + lp = _LPProblem( + c=[[1, 2]], + A_ub=np.random.rand(2, 2), + b_ub=[[1], [2]], + A_eq=np.random.rand(2, 2), + b_eq=[[1], [2]], + bounds=[(0, 1)] + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array([1, 2])) + assert_allclose(lp_cleaned.b_ub, np.array([1, 2])) + assert_allclose(lp_cleaned.b_eq, np.array([1, 2])) + assert_equal(lp_cleaned.bounds, [(0, 1)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test_bad_bounds(): + lp = _LPProblem(c=[1, 2]) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2, 2)])) + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)])) + + +def test_good_bounds(): + lp = _LPProblem(c=[1, 2]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[[]])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), + (-np.inf, None), + (None, np.inf), + (-np.inf, np.inf)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..7f695d94569438233afd1c3b3d2db2e390654f01 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py @@ -0,0 +1,815 @@ +import math +from itertools import product + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy.sparse import csr_matrix, csc_matrix, lil_matrix + +from scipy.optimize._numdiff import ( + _adjust_scheme_to_bounds, approx_derivative, check_derivative, + group_columns, _eps_for_method, _compute_absolute_step) + + +def test_group_columns(): + structure = [ + [1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0] + ] + for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]: + A = transform(structure) + order = np.arange(6) + groups_true = np.array([0, 1, 2, 0, 1, 2]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + order = [1, 2, 4, 3, 5, 0] + groups_true = np.array([2, 0, 1, 2, 0, 1]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + # Test repeatability. + groups_1 = group_columns(A) + groups_2 = group_columns(A) + assert_equal(groups_1, groups_2) + + +def test_correct_fp_eps(): + # check that relative step size is correct for FP size + EPS = np.finfo(np.float64).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float64, method), + relative_step[method]) + assert_allclose( + _eps_for_method(np.complex128, np.complex128, method), + relative_step[method] + ) + + # check another FP size + EPS = np.finfo(np.float32).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float32, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float64, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float32, method), + relative_step[method] + ) + + +class TestAdjustSchemeToBounds: + def test_no_bounds(self): + x0 = np.zeros(3) + h = np.full(3, 1e-2) + inf_lower = np.empty_like(x0) + inf_upper = np.empty_like(x0) + inf_lower.fill(-np.inf) + inf_upper.fill(np.inf) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + def test_with_bound(self): + x0 = np.array([0.0, 0.85, -0.85]) + lb = -np.ones(3) + ub = np.ones(3) + h = np.array([1, 1, -1]) * 1e-1 + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, h) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.abs(h)) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + assert_equal(one_sided, np.array([False, True, True])) + + def test_tight_bounds(self): + lb = np.array([-0.03, -0.03]) + ub = np.array([0.05, 0.05]) + x0 = np.array([0.0, 0.03]) + h = np.array([-0.1, -0.1]) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.05, -0.06])) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.025, -0.03])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.03, -0.03])) + assert_equal(one_sided, np.array([False, True])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.015, -0.015])) + assert_equal(one_sided, np.array([False, True])) + + +class TestApproxDerivativesDense: + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def wrong_dimensions_fun(self, x): + return np.array([x**2, np.tan(x), np.exp(x)]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def fun_parametrized(self, x, c0, c1=1.0): + return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])]) + + def jac_parametrized(self, x, c0, c1=0.1): + return np.array([ + [c0 * np.exp(c0 * x[0]), 0], + [0, c1 * np.exp(c1 * x[1])] + ]) + + def fun_with_nan(self, x): + return x if np.abs(x) <= 1e-8 else np.nan + + def jac_with_nan(self, x): + return 1.0 if np.abs(x) <= 1e-8 else np.nan + + def fun_zero_jacobian(self, x): + return np.array([x[0] * x[1], np.cos(x[0] * x[1])]) + + def jac_zero_jacobian(self, x): + return np.array([ + [x[1], x[0]], + [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])] + ]) + + def jac_non_numpy(self, x): + # x can be a scalar or an array [val]. + # Cast to true scalar before handing over to math.exp + xp = np.asarray(x).item() + return math.exp(xp) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs') + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + abs_step=1.49e-8) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs') + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs') + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-7) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + abs_step=1.49e-8, rel_step=np.inf) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=3e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs') + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-5) + assert_allclose(jac_diff_3, jac_true, rtol=1e-6) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_wrong_dimensions(self): + x0 = 1.0 + assert_raises(RuntimeError, approx_derivative, + self.wrong_dimensions_fun, x0) + f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) + assert_raises(ValueError, approx_derivative, + self.wrong_dimensions_fun, x0, f0=f0) + + def test_custom_rel_step(self): + x0 = np.array([-0.1, 0.1]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', rel_step=1e-4) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + rel_step=1e-4) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-2) + assert_allclose(jac_diff_3, jac_true, rtol=1e-4) + + def test_options(self): + x0 = np.array([1.0, 1.0]) + c0 = -1.0 + c1 = 1.0 + lb = 0.0 + ub = 2.0 + f0 = self.fun_parametrized(x0, c0, c1=c1) + rel_step = np.array([-1e-6, 1e-7]) + jac_true = self.jac_parametrized(x0, c0, c1) + jac_diff_2 = approx_derivative( + self.fun_parametrized, x0, method='2-point', rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_parametrized, x0, rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_with_bounds_2_point(self): + lb = -np.ones(2) + ub = np.ones(2) + + x0 = np.array([-2.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, bounds=(lb, ub)) + + x0 = np.array([-1.0, 1.0]) + jac_diff = approx_derivative(self.fun_vector_vector, x0, + method='2-point', bounds=(lb, ub)) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + def test_with_bounds_3_point(self): + lb = np.array([1.0, 1.0]) + ub = np.array([2.0, 2.0]) + + x0 = np.array([1.0, 2.0]) + jac_true = self.jac_vector_vector(x0) + + jac_diff = approx_derivative(self.fun_vector_vector, x0) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, np.inf)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(-np.inf, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + def test_tight_bounds(self): + x0 = np.array([10.0, 10.0]) + lb = x0 - 3e-9 + ub = x0 + 2e-9 + jac_true = self.jac_vector_vector(x0) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', + rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + jac_diff = approx_derivative( + self.fun_vector_vector, x0, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_true, jac_diff, rtol=1e-6) + + def test_bound_switches(self): + lb = -1e-8 + ub = 1e-8 + x0 = 0.0 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + x0 = 1e-8 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_non_numpy(self): + x0 = 1.0 + jac_true = self.jac_non_numpy(x0) + jac_diff_2 = approx_derivative(self.jac_non_numpy, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.jac_non_numpy, x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-8) + + # math.exp cannot handle complex arguments, hence this raises + assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0, + **dict(method='cs')) + + def test_fp(self): + # checks that approx_derivative works for FP size other than 64. + # Example is derived from the minimal working example in gh12991. + np.random.seed(1) + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + x = np.linspace(0, 1, 100, dtype=np.float64) + y = np.random.random(100).astype(np.float64) + p0 = np.array([-1.0, -1.0]) + + jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y)) + + # parameter vector is float32, func output is float64 + jac_fp = approx_derivative(err, p0.astype(np.float32), + method='2-point', args=(x, y)) + assert err(p0, x, y).dtype == np.float64 + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # parameter vector is float64, func output is float32 + def err_fp32(p): + assert p.dtype == np.float32 + return err(p, x, y).astype(np.float32) + + jac_fp = approx_derivative(err_fp32, p0.astype(np.float32), + method='2-point') + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # check upper bound of error on the derivative for 2-point + def f(x): + return np.sin(x) + def g(x): + return np.cos(x) + def hess(x): + return -np.sin(x) + + def calc_atol(h, x0, f, hess, EPS): + # truncation error + t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h))) + # roundoff error. There may be a divisor (>1) missing from + # the following line, so this contribution is possibly + # overestimated + t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h))) + return t0 + t1 + + for dtype in [np.float16, np.float32, np.float64]: + EPS = np.finfo(dtype).eps + x0 = np.array(1.0).astype(dtype) + h = _compute_absolute_step(None, x0, f(x0), '2-point') + atol = calc_atol(h, x0, f, hess, EPS) + err = approx_derivative(f, x0, method='2-point', + abs_step=h) - g(x0) + assert abs(err) < atol + + def test_check_derivative(self): + x0 = np.array([-10.0, 10]) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-9) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-6) + + x0 = np.array([0.0, 0.0]) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + + +class TestApproxDerivativeSparse: + # Example from Numerical Optimization 2nd edition, p. 198. + def setup_method(self): + np.random.seed(0) + self.n = 50 + self.lb = -0.1 * (1 + np.arange(self.n)) + self.ub = 0.1 * (1 + np.arange(self.n)) + self.x0 = np.empty(self.n) + self.x0[::2] = (1 - 1e-7) * self.lb[::2] + self.x0[1::2] = (1 - 1e-7) * self.ub[1::2] + + self.J_true = self.jac(self.x0) + + def fun(self, x): + e = x[1:]**3 - x[:-1]**2 + return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0)) + + def jac(self, x): + n = x.size + J = np.zeros((n, n)) + J[0, 0] = -4 * x[0] + J[0, 1] = 6 * x[1]**2 + for i in range(1, n - 1): + J[i, i - 1] = -6 * x[i-1] + J[i, i] = 9 * x[i]**2 - 4 * x[i] + J[i, i + 1] = 6 * x[i+1]**2 + J[-1, -1] = 9 * x[-1]**2 + J[-1, -2] = -6 * x[-2] + + return J + + def structure(self, n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + def test_all(self): + A = self.structure(self.n) + order = np.arange(self.n) + groups_1 = group_columns(A, order) + np.random.shuffle(order) + groups_2 = group_columns(A, order) + + for method, groups, l, u in product( + ['2-point', '3-point', 'cs'], [groups_1, groups_2], + [-np.inf, self.lb], [np.inf, self.ub]): + J = approx_derivative(self.fun, self.x0, method=method, + bounds=(l, u), sparsity=(A, groups)) + assert_(isinstance(J, csr_matrix)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + rel_step = np.full_like(self.x0, 1e-8) + rel_step[::2] *= -1 + J = approx_derivative(self.fun, self.x0, method=method, + rel_step=rel_step, sparsity=(A, groups)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-5) + + def test_no_precomputed_groups(self): + A = self.structure(self.n) + J = approx_derivative(self.fun, self.x0, sparsity=A) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + def test_equivalence(self): + structure = np.ones((self.n, self.n), dtype=int) + groups = np.arange(self.n) + for method in ['2-point', '3-point', 'cs']: + J_dense = approx_derivative(self.fun, self.x0, method=method) + J_sparse = approx_derivative( + self.fun, self.x0, sparsity=(structure, groups), method=method) + assert_allclose(J_dense, J_sparse.toarray(), + rtol=5e-16, atol=7e-15) + + def test_check_derivative(self): + def jac(x): + return csr_matrix(self.jac(x)) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + +class TestApproxDerivativeLinearOperator: + + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true*p, + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true*p, + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true*p, + rtol=5e-6) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), + rtol=5e-6) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-7) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_vector(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7) + + def test_exception(self): + x0 = np.array([-100.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, + method='2-point', bounds=(1, np.inf)) + + +def test_absolute_step_sign(): + # test for gh12487 + # if an absolute step is specified for 2-point differences make sure that + # the side corresponds to the step. i.e. if step is positive then forward + # differences should be used, if step is negative then backwards + # differences should be used. + + # function has double discontinuity at x = [-1, -1] + # first component is \/, second component is /\ + def f(x): + return -np.abs(x[0] + 1) + np.abs(x[1] + 1) + + # check that the forward difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8) + assert_allclose(grad, [-1.0, 1.0]) + + # check that the backwards difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8) + assert_allclose(grad, [1.0, -1.0]) + + # check that the forwards difference is used with a step for both + # parameters + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8] + ) + assert_allclose(grad, [-1.0, 1.0]) + + # check that we can mix forward/backwards steps. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8] + ) + assert_allclose(grad, [-1.0, -1.0]) + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8] + ) + assert_allclose(grad, [1.0, 1.0]) + + # the forward step should reverse to a backwards step if it runs into a + # bound + # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level + # function. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=1e-8, + bounds=(-np.inf, -1) + ) + assert_allclose(grad, [1.0, -1.0]) + + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf) + ) + assert_allclose(grad, [-1.0, 1.0]) + + +def test__compute_absolute_step(): + # tests calculation of absolute step from rel_step + methods = ['2-point', '3-point', 'cs'] + + x0 = np.array([1e-5, 0, 1, 1e5]) + + EPS = np.finfo(np.float64).eps + relative_step = { + "2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5 + } + f0 = np.array(1.0) + + for method in methods: + rel_step = relative_step[method] + correct_step = np.array([rel_step, + rel_step * 1., + rel_step * 1., + rel_step * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(None, x0, f0, method) + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(None, -x0, f0, method) + assert_allclose(abs_step, sign_x0 * correct_step) + + # if a relative step is provided it should be used + rel_step = np.array([0.1, 1, 10, 100]) + correct_step = np.array([rel_step[0] * x0[0], + relative_step['2-point'], + rel_step[2] * 1., + rel_step[3] * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point') + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point') + assert_allclose(abs_step, sign_x0 * correct_step) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..817282011699dea333042a4173f65c999a2925fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py @@ -0,0 +1,228 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" + +# TODO: add tests for: +# https://github.com/scipy/scipy/issues/5400 +# https://github.com/scipy/scipy/issues/6690 + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal) + +from .test_linprog import magic_square +from scipy.optimize._remove_redundancy import _remove_redundancy_svd +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse +from scipy.optimize._remove_redundancy import _remove_redundancy_id + +from scipy.sparse import csc_matrix + + +def setup_module(): + np.random.seed(2017) + + +def redundancy_removed(A, B): + """Checks whether a matrix contains only independent rows of another""" + for rowA in A: + # `rowA in B` is not a reliable check + for rowB in B: + if np.all(rowA == rowB): + break + else: + return False + return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B) + + +class RRCommonTests: + def test_no_redundancy(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_allclose(A0, A1) + assert_allclose(b0, b1) + assert_equal(status, 0) + + def test_infeasible_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 2) + + def test_remove_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + b[1] = 0 + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_allclose(A1, A[[0, 2], :]) + assert_allclose(b1, b[[0, 2]]) + + def test_infeasible_m_gt_n(self): + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_eq_n(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = 2 * A0[-2, :] + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_lt_n(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_m_gt_n(self): + np.random.seed(2032) + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + x = np.linalg.solve(A0[:n, :], b0[:n]) + b0[n:] = A0[n:, :].dot(x) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], n) + assert_equal(np.linalg.matrix_rank(A1), n) + + def test_m_gt_n_rank_deficient(self): + m, n = 20, 10 + A0 = np.zeros((m, n)) + A0[:, 0] = 1 + b0 = np.ones(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_allclose(A1, A0[0:1, :]) + assert_allclose(b1, b0[0]) + + def test_m_lt_n_rank_deficient(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + b0[-1] = np.arange(m - 1).dot(b0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], 8) + assert_equal(np.linalg.matrix_rank(A1), 8) + + def test_dense1(self): + A = np.ones((6, 6)) + A[0, :3] = 0 + A[1, 3:] = 0 + A[3:, ::2] = -1 + A[3, :2] = 0 + A[4, 2:] = 0 + b = np.zeros(A.shape[0]) + + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense2(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense3(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.random.rand(A.shape[0]) + b[-1] = np.sum(b[:-1]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_m_gt_n_sparse(self): + np.random.seed(2013) + m, n = 20, 5 + p = 0.1 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_lt_n_sparse(self): + np.random.seed(2017) + m, n = 20, 50 + p = 0.05 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_eq_n_sparse(self): + np.random.seed(2017) + m, n = 100, 100 + p = 0.01 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_magic_square(self): + A, b, c, numbers, _ = magic_square(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 23) + assert_equal(np.linalg.matrix_rank(A1), 23) + + def test_magic_square2(self): + A, b, c, numbers, _ = magic_square(4) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 39) + assert_equal(np.linalg.matrix_rank(A1), 39) + + +class TestRRSVD(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_svd(A, b) + + +class TestRRPivotDense(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_pivot_dense(A, b) + + +class TestRRID(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_id(A, b) + + +class TestRRPivotSparse(RRCommonTests): + def rr(self, A, b): + rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b) + A1, b1, status, message = rr_res + return A1.toarray(), b1, status, message diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py new file mode 100644 index 0000000000000000000000000000000000000000..3827651a8e513f9543a2b0af3fbbc49cd82915a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py @@ -0,0 +1,123 @@ +""" +Unit tests for optimization routines from _root.py. +""" +from numpy.testing import assert_, assert_equal +import pytest +from pytest import raises as assert_raises, warns as assert_warns +import numpy as np + +from scipy.optimize import root + + +class TestRoot: + def test_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return np.array([x**3 - 1, y**3 - 1]) + + def dfunc(z): + x, y = z + return np.array([[3*x**2, 0], [0, 3*y**2]]) + + for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'diagbroyden', 'krylov']: + if method in ('linearmixing', 'excitingmixing'): + # doesn't converge + continue + + if method in ('hybr', 'lm'): + jac = dfunc + else: + jac = None + + sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method) + sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method) + msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" + assert_(sol1.success, msg) + assert_(sol2.success, msg) + assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(), + msg) + + def test_tol_norm(self): + + def norm(x): + return abs(x[0]) + + for method in ['excitingmixing', + 'diagbroyden', + 'linearmixing', + 'anderson', + 'broyden1', + 'broyden2', + 'krylov']: + + root(np.zeros_like, np.zeros(2), method=method, + options={"tol_norm": norm}) + + def test_minimize_scalar_coerce_args_param(self): + # github issue #3503 + def func(z, f=1): + x, y = z + return np.array([x**3 - 1, y**3 - f]) + root(func, [1.1, 1.1], args=1.5) + + def test_f_size(self): + # gh8320 + # check that decreasing the size of the returned array raises an error + # and doesn't segfault + class fun: + def __init__(self): + self.count = 0 + + def __call__(self, x): + self.count += 1 + + if not (self.count % 5): + ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0 + else: + ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0, + 0.5 * (x[1] - x[0]) ** 3 + x[1]]) + + return ret + + F = fun() + with assert_raises(ValueError): + root(F, [0.1, 0.0], method='lm') + + def test_gh_10370(self): + # gh-10370 reported that passing both `args` and `jac` to `root` with + # `method='krylov'` caused a failure. Ensure that this is fixed whether + # the gradient is passed via `jac` or as a second output of `fun`. + def fun(x, ignored): + return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2] + + def grad(x, ignored): + return [[3, 0.5 * x[1]], [0.2 * x[0], 5]] + + def fun_grad(x, ignored): + return fun(x, ignored), grad(x, ignored) + + x0 = np.zeros(2) + + ref = root(fun, x0, args=(1,), method='krylov') + message = 'Method krylov does not use the jacobian' + with assert_warns(RuntimeWarning, match=message): + res1 = root(fun, x0, args=(1,), method='krylov', jac=grad) + with assert_warns(RuntimeWarning, match=message): + res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True) + + assert_equal(res1.x, ref.x) + assert_equal(res2.x, ref.x) + assert res1.success is res2.success is ref.success is True + + @pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2", + "anderson", "linearmixing", + "diagbroyden", "excitingmixing", + "krylov", "df-sane"]) + def test_method_in_result(self, method): + def func(x): + return x - 1 + + res = root(func, x0=[1], method=method) + assert res.method == method diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..b739bc3d69195c8f13e07407db960e0407a2f26c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py @@ -0,0 +1,1159 @@ +import logging +import sys + +import numpy +import numpy as np +import time +from multiprocessing import Pool +from numpy.testing import assert_allclose, IS_PYPY +import pytest +from pytest import raises as assert_raises, warns +from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen, + rosen_der, rosen_hess, NonlinearConstraint) +from scipy.optimize._constraints import new_constraint_to_old +from scipy.optimize._shgo import SHGO + + +class StructTestFunction: + def __init__(self, bounds, expected_x, expected_fun=None, + expected_xl=None, expected_funl=None): + self.bounds = bounds + self.expected_x = expected_x + self.expected_fun = expected_fun + self.expected_xl = expected_xl + self.expected_funl = expected_funl + + +def wrap_constraints(g): + cons = [] + if g is not None: + if not isinstance(g, (tuple, list)): + g = (g,) + else: + pass + for g in g: + cons.append({'type': 'ineq', + 'fun': g}) + cons = tuple(cons) + else: + cons = None + return cons + + +class StructTest1(StructTestFunction): + def f(self, x): + return x[0] ** 2 + x[1] ** 2 + + def g(x): + return -(numpy.sum(x, axis=0) - 6.0) + + cons = wrap_constraints(g) + + +test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], + expected_x=[0, 0]) +test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], + expected_x=[0, 0]) +test1_3 = StructTest1(bounds=[(None, None), (None, None)], + expected_x=[0, 0]) + + +class StructTest2(StructTestFunction): + """ + Scalar function with several minima to test all minimiser retrievals + """ + + def f(self, x): + return (x - 30) * numpy.sin(x) + + def g(x): + return 58 - numpy.sum(x, axis=0) + + cons = wrap_constraints(g) + + +test2_1 = StructTest2(bounds=[(0, 60)], + expected_x=[1.53567906], + expected_fun=-28.44677132, + # Important: test that funl return is in the correct + # order + expected_xl=numpy.array([[1.53567906], + [55.01782167], + [7.80894889], + [48.74797493], + [14.07445705], + [42.4913859], + [20.31743841], + [36.28607535], + [26.43039605], + [30.76371366]]), + + expected_funl=numpy.array([-28.44677132, -24.99785984, + -22.16855376, -18.72136195, + -15.89423937, -12.45154942, + -9.63133158, -6.20801301, + -3.43727232, -0.46353338]) + ) + +test2_2 = StructTest2(bounds=[(0, 4.5)], + expected_x=[1.53567906], + expected_fun=[-28.44677132], + expected_xl=numpy.array([[1.53567906]]), + expected_funl=numpy.array([-28.44677132]) + ) + + +class StructTest3(StructTestFunction): + """ + Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981) + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + Minimize: f = 0.01 * (x_1)**2 + (x_2)**2 + + Subject to: x_1 * x_2 - 25.0 >= 0, + (x_1)**2 + (x_2)**2 - 25.0 >= 0, + 2 <= x_1 <= 50, + 0 <= x_2 <= 50. + + Approx. Answer: + f([(250)**0.5 , (2.5)**0.5]) = 5.0 + + + """ + + # amended to test vectorisation of constraints + def f(self, x): + return 0.01 * (x[0]) ** 2 + (x[1]) ** 2 + + def g1(x): + return x[0] * x[1] - 25.0 + + def g2(x): + return x[0] ** 2 + x[1] ** 2 - 25.0 + + # g = (g1, g2) + # cons = wrap_constraints(g) + + def g(x): + return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0 + + # this checks that shgo can be sent new-style constraints + __nlc = NonlinearConstraint(g, 0, np.inf) + cons = (__nlc,) + +test3_1 = StructTest3(bounds=[(2, 50), (0, 50)], + expected_x=[250 ** 0.5, 2.5 ** 0.5], + expected_fun=5.0 + ) + + +class StructTest4(StructTestFunction): + """ + Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981) + + NOTE: Did not find in original reference to HS collection, refer to + Henderson (2015) problem 7 instead. 02.03.2016 + """ + + def f(self, x): + return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4 + + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[ + 6] ** 4 + - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6] + ) + + def g1(x): + return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2 + + 5 * x[4] - 127) + + def g2(x): + return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0) + + def g3(x): + return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196) + + def g4(x): + return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2 + + 5 * x[5] - 11 * x[6]) + + g = (g1, g2, g3, g4) + + cons = wrap_constraints(g) + + +test4_1 = StructTest4(bounds=[(-10, 10), ] * 7, + expected_x=[2.330499, 1.951372, -0.4775414, + 4.365726, -0.6244870, 1.038131, 1.594227], + expected_fun=680.6300573 + ) + + +class StructTest5(StructTestFunction): + def f(self, x): + return (-(x[1] + 47.0) + * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + g = None + cons = wrap_constraints(g) + + +test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)], + expected_fun=[-959.64066272085051], + expected_x=[512., 404.23180542]) + + +class StructTestLJ(StructTestFunction): + """ + LennardJones objective function. Used to test symmetry constraints + settings. + """ + + def f(self, x, *args): + print(f'x = {x}') + self.N = args[0] + k = int(self.N / 3) + s = 0.0 + + for i in range(k - 1): + for j in range(i + 1, k): + a = 3 * i + b = 3 * j + xd = x[a] - x[b] + yd = x[a + 1] - x[b + 1] + zd = x[a + 2] - x[b + 2] + ed = xd * xd + yd * yd + zd * zd + ud = ed * ed * ed + if ed > 0.0: + s += (1.0 / ud - 2.0) / ud + + return s + + g = None + cons = wrap_constraints(g) + + +N = 6 +boundsLJ = list(zip([-4.0] * 6, [4.0] * 6)) + +testLJ = StructTestLJ(bounds=boundsLJ, + expected_fun=[-1.0], + expected_x=None, + # expected_x=[-2.71247337e-08, + # -2.71247337e-08, + # -2.50000222e+00, + # -2.71247337e-08, + # -2.71247337e-08, + # -1.50000222e+00] + ) + + +class StructTestS(StructTestFunction): + def f(self, x): + return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2 + + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2) + + g = None + cons = wrap_constraints(g) + + +test_s = StructTestS(bounds=[(0, 2.0), ] * 4, + expected_fun=0.0, + expected_x=numpy.ones(4) - 0.5 + ) + + +class StructTestTable(StructTestFunction): + def f(self, x): + if x[0] == 3.0 and x[1] == 3.0: + return 50 + else: + return 100 + + g = None + cons = wrap_constraints(g) + + +test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)], + expected_fun=[50], + expected_x=[3.0, 3.0]) + + +class StructTestInfeasible(StructTestFunction): + """ + Test function with no feasible domain. + """ + + def f(self, x, *args): + return x[0] ** 2 + x[1] ** 2 + + def g1(x): + return x[0] + x[1] - 1 + + def g2(x): + return -(x[0] + x[1] - 1) + + def g3(x): + return -x[0] + x[1] - 1 + + def g4(x): + return -(-x[0] + x[1] - 1) + + g = (g1, g2, g3, g4) + cons = wrap_constraints(g) + + +test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)], + expected_fun=None, + expected_x=None + ) + + +@pytest.mark.skip("Not a test") +def run_test(test, args=(), test_atol=1e-5, n=100, iters=None, + callback=None, minimizer_kwargs=None, options=None, + sampling_method='sobol', workers=1): + res = shgo(test.f, test.bounds, args=args, constraints=test.cons, + n=n, iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, options=options, + sampling_method=sampling_method, workers=workers) + + print(f'res = {res}') + logging.info(f'res = {res}') + if test.expected_x is not None: + numpy.testing.assert_allclose(res.x, test.expected_x, + rtol=test_atol, + atol=test_atol) + + # (Optional tests) + if test.expected_fun is not None: + numpy.testing.assert_allclose(res.fun, + test.expected_fun, + atol=test_atol) + + if test.expected_xl is not None: + numpy.testing.assert_allclose(res.xl, + test.expected_xl, + atol=test_atol) + + if test.expected_funl is not None: + numpy.testing.assert_allclose(res.funl, + test.expected_funl, + atol=test_atol) + return + + +# Base test functions: +class TestShgoSobolTestFunctions: + """ + Global optimisation tests with Sobol sampling: + """ + + # Sobol algorithm + def test_f1_1_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1) + + def test_f1_2_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2) + + def test_f1_3_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" + options = {'disp': True} + run_test(test1_3, options=options) + + def test_f2_1_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + run_test(test2_1) + + def test_f2_2_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2) + + def test_f3_sobol(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1) + + @pytest.mark.slow + def test_f4_sobol(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + options = {'infty_constraints': False} + # run_test(test4_1, n=990, options=options) + run_test(test4_1, n=990 * 2, options=options) + + def test_f5_1_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=30) + run_test(test5_1, n=60) + + def test_f5_2_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=60, iters=5) + run_test(test5_1, n=60, iters=5) + + # def test_t911(self): + # """1D tabletop function""" + # run_test(test11_1) + + +class TestShgoSimplicialTestFunctions: + """ + Global optimisation tests with Simplicial sampling: + """ + + def test_f1_1_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1, n=1, sampling_method='simplicial') + + def test_f1_2_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2, n=1, sampling_method='simplicial') + + def test_f1_3_simplicial(self): + """Multivariate test function 1: x[0]**2 + x[1]**2 + with bounds=[(None, None),(None, None)]""" + run_test(test1_3, n=5, sampling_method='simplicial') + + def test_f2_1_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=200, iters=7, options=options, + sampling_method='simplicial') + + def test_f2_2_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2, n=1, sampling_method='simplicial') + + def test_f3_simplicial(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1, n=1, sampling_method='simplicial') + + @pytest.mark.slow + def test_f4_simplicial(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + run_test(test4_1, n=1, sampling_method='simplicial') + + def test_lj_symmetry_old(self): + """LJ: Symmetry-constrained test function""" + options = {'symmetry': True, + 'disp': True} + args = (6,) # Number of atoms + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_1_lj_symmetry(self): + """LJ: Symmetry constrained test function""" + options = {'symmetry': [0, ] * 6, + 'disp': True} + args = (6,) # No. of atoms + + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_2_cons_symmetry(self): + """Symmetry constrained test function""" + options = {'symmetry': [0, 0], + 'disp': True} + + run_test(test1_1, n=200, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_3_cons_symmetry(self): + """Assymmetrically constrained test function""" + options = {'symmetry': [0, 0, 0, 3], + 'disp': True} + + run_test(test_s, n=10000, + options=options, + iters=1, + sampling_method='simplicial') + + @pytest.mark.skip("Not a test") + def test_f0_min_variance(self): + """Return a minimum on a perfectly symmetric problem, based on + gh10429""" + avg = 0.5 # Given average value of x + cons = {'type': 'eq', 'fun': lambda x: numpy.mean(x) - avg} + + # Minimize the variance of x under the given constraint + res = shgo(numpy.var, bounds=6 * [(0, 1)], constraints=cons) + assert res.success + assert_allclose(res.fun, 0, atol=1e-15) + assert_allclose(res.x, 0.5) + + @pytest.mark.skip("Not a test") + def test_f0_min_variance_1D(self): + """Return a minimum on a perfectly symmetric 1D problem, based on + gh10538""" + + def fun(x): + return x * (x - 1.0) * (x - 0.5) + + bounds = [(0, 1)] + res = shgo(fun, bounds=bounds) + ref = minimize_scalar(fun, bounds=bounds[0]) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, rtol=1e-6) + +# Argument test functions +class TestShgoArguments: + def test_1_1_simpl_iter(self): + """Iterative simplicial sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=2, sampling_method='simplicial') + + def test_1_2_simpl_iter(self): + """Iterative simplicial on TestFunction 2 (univariate)""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=None, iters=9, options=options, + sampling_method='simplicial') + + def test_2_1_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=1, sampling_method='sobol') + + def test_2_2_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 2 (univariate)""" + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=1, sampling_method='sobol') + + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5) + + def test_3_1_disp_simplicial(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate) + """ + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, + sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_3_2_disp_sobol(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, sampling_method='sobol', + callback=callback_func, options={'disp': True}) + + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_args_gh14589(self): + """Using `args` used to cause `shgo` to fail; see #14589, #15986, + #16506""" + res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2) + ) + ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)]) + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + @pytest.mark.slow + def test_4_1_known_f_min(self): + """Test known function minima stopping criteria""" + # Specify known function value + options = {'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + 'minimize_every_iter': True} + # TODO: Make default n higher for faster tests + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + @pytest.mark.slow + def test_4_2_known_f_min(self): + """Test Global mode limiting local evaluations""" + options = { # Specify known function value + 'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform + 'minimize_every_iter': True, + 'local_iter': 1} + + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + def test_4_4_known_f_min(self): + """Test Global mode limiting local evaluations for 1D funcs""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=None, options=options, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_1_simplicial_argless(self): + """Test Default simplicial sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons) + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_2_sobol_argless(self): + """Test Default sobol sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_6_1_simplicial_max_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'max_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_6_2_simplicial_min_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'min_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_7_1_minkwargs(self): + """Test the minimizer_kwargs arguments for solvers with constraints""" + # Test solvers + for solver in ['COBYLA', 'SLSQP']: + # Note that passing global constraints to SLSQP is tested in other + # unittests which run test4_1 normally + minimizer_kwargs = {'method': solver, + 'constraints': test3_1.cons} + run_test(test3_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_7_2_minkwargs(self): + """Test the minimizer_kwargs default inits""" + minimizer_kwargs = {'ftol': 1e-5} + options = {'disp': True} # For coverage purposes + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0], + minimizer_kwargs=minimizer_kwargs, options=options) + + def test_7_3_minkwargs(self): + """Test minimizer_kwargs arguments for solvers without constraints""" + for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', + 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']: + def jac(x): + return numpy.array([2 * x[0], 2 * x[1]]).T + + def hess(x): + return numpy.array([[2, 0], [0, 2]]) + + minimizer_kwargs = {'method': solver, + 'jac': jac, + 'hess': hess} + logging.info(f"Solver = {solver}") + logging.info("=" * 100) + run_test(test1_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_8_homology_group_diff(self): + options = {'minhgrd': 1, + 'minimize_every_iter': True} + + run_test(test1_1, n=None, iters=None, options=options, + sampling_method='simplicial') + + def test_9_cons_g(self): + """Test single function constraint passing""" + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0]) + + @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32', + reason="Failing and fix in PyPy not planned (see gh-18632)") + def test_10_finite_time(self): + """Test single function constraint passing""" + options = {'maxtime': 1e-15} + + def f(x): + time.sleep(1e-14) + return 0.0 + + res = shgo(f, test1_1.bounds, iters=5, options=options) + # Assert that only 1 rather than 5 requested iterations ran: + assert res.nit == 1 + + def test_11_f_min_0(self): + """Test to cover the case where f_lowest == 0""" + options = {'f_min': 0.0, + 'disp': True} + res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None, + options=options, sampling_method='sobol') + numpy.testing.assert_equal(0, res.x[0]) + numpy.testing.assert_equal(0, res.x[1]) + + # @nottest + @pytest.mark.skip(reason="no way of currently testing this") + def test_12_sobol_inf_cons(self): + """Test to cover the case where f_lowest == 0""" + # TODO: This test doesn't cover anything new, it is unknown what the + # original test was intended for as it was never complete. Delete or + # replace in the future. + options = {'maxtime': 1e-15, + 'f_min': 0.0} + res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + numpy.testing.assert_equal(0.0, res.fun) + + def test_13_high_sobol(self): + """Test init of high-dimensional sobol sequences""" + + def f(x): + return 0 + + bounds = [(None, None), ] * 41 + SHGOc = SHGO(f, bounds, sampling_method='sobol') + # SHGOc.sobol_points(2, 50) + SHGOc.sampling_function(2, 50) + + def test_14_local_iter(self): + """Test limited local iterations for a pseudo-global mode""" + options = {'local_iter': 4} + run_test(test5_1, n=60, options=options) + + def test_15_min_every_iter(self): + """Test minimize every iter options and cover function cache""" + options = {'minimize_every_iter': True} + run_test(test1_1, n=1, iters=7, options=options, + sampling_method='sobol') + + def test_16_disp_bounds_minimizer(self, capsys): + """Test disp=True with minimizers that do not support bounds """ + options = {'disp': True} + minimizer_kwargs = {'method': 'nelder-mead'} + run_test(test1_2, sampling_method='simplicial', + options=options, minimizer_kwargs=minimizer_kwargs) + + def test_17_custom_sampling(self): + """Test the functionality to add custom sampling methods to shgo""" + + def sample(n, d): + return numpy.random.uniform(size=(n, d)) + + run_test(test1_1, n=30, sampling_method=sample) + + def test_18_bounds_class(self): + # test that new and old bounds yield same result + def f(x): + return numpy.square(x).sum() + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = shgo(f, bounds_old) + res_new_bounds = shgo(f, bounds_new) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + x_opt = numpy.array([-1., 1., 0.]) + numpy.testing.assert_allclose(res_new_bounds.x, x_opt) + numpy.testing.assert_allclose(res_new_bounds.x, + res_old_bounds.x) + + def test_19_parallelization(self): + """Test the functionality to add custom sampling methods to shgo""" + + with Pool(2) as p: + run_test(test1_1, n=30, workers=p.map) # Constrained + run_test(test1_1, n=30, workers=map) # Constrained + with Pool(2) as p: + run_test(test_s, n=30, workers=p.map) # Unconstrained + run_test(test_s, n=30, workers=map) # Unconstrained + + def test_20_constrained_args(self): + """Test that constraints can be passed to arguments""" + + def eggholder(x): + return (-(x[1] + 47.0) + * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + def f(x): # (cattle-feed) + return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3] + + bounds = [(0, 1.0), ] * 4 + + def g1_modified(x, i): + return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[ + 3] - 5 # >=0 + + def g2(x): + return (12 * x[0] + 11.9 * x[1] + 41.8 * x[2] + 52.1 * x[3] - 21 + - 1.645 * numpy.sqrt(0.28 * x[0] ** 2 + 0.19 * x[1] ** 2 + + 20.5 * x[2] ** 2 + 0.62 * x[3] ** 2) + ) # >=0 + + def h1(x): + return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + + cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)}, + {'type': 'ineq', 'fun': g2}, + {'type': 'eq', 'fun': h1}) + + shgo(f, bounds, n=300, iters=1, constraints=cons) + # using constrain with arguments AND sampling method sobol + shgo(f, bounds, n=300, iters=1, constraints=cons, + sampling_method='sobol') + + def test_21_1_jac_true(self): + """Test that shgo can handle objective functions that return the + gradient alongside the objective value. Fixes gh-13547""" + # previous + def func(x): + return numpy.sum(numpy.power(x, 2)), 2 * x + + shgo( + func, + bounds=[[-1, 1], [1, 2]], + n=100, iters=5, + sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True} + ) + + # new + def func(x): + return numpy.sum(x ** 2), 2 * x + + bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]] + + res = shgo(func, bounds=bounds, sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True}) + ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds, + jac=True) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp']) + def test_21_2_derivative_options(self, derivative): + """shgo used to raise an error when passing `options` with 'jac' + # see gh-12963. check that this is resolved + """ + + def objective(x): + return 3 * x[0] * x[0] + 2 * x[0] + 5 + + def gradient(x): + return 6 * x[0] + 2 + + def hess(x): + return 6 + + def hessp(x, p): + return 6 * p + + derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp} + options = {derivative: derivative_funcs[derivative]} + minimizer_kwargs = {'method': 'trust-constr'} + + bounds = [(-100, 100)] + res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs, + **options) + + assert res.success + numpy.testing.assert_allclose(res.fun, ref.fun) + numpy.testing.assert_allclose(res.x, ref.x) + + def test_21_3_hess_options_rosen(self): + """Ensure the Hessian gets passed correctly to the local minimizer + routine. Previous report gh-14533. + """ + bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)] + options = {'jac': rosen_der, 'hess': rosen_hess} + minimizer_kwargs = {'method': 'Newton-CG'} + res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(rosen, numpy.zeros(5), method='Newton-CG', + **options) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + def test_21_arg_tuple_sobol(self): + """shgo used to raise an error when passing `args` with Sobol sampling + # see gh-12114. check that this is resolved""" + + def fun(x, k): + return x[0] ** k + + constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1}) + + bounds = [(0, 10)] + res = shgo(fun, bounds, args=(1,), constraints=constraints, + sampling_method='sobol') + ref = minimize(fun, numpy.zeros(1), bounds=bounds, args=(1,), + constraints=constraints) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +# Failure test functions +class TestShgoFailures: + def test_1_maxiter(self): + """Test failure on insufficient iterations""" + options = {'maxiter': 2} + res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None, + options=options, sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + # numpy.testing.assert_equal(4, res.nfev) + numpy.testing.assert_equal(4, res.tnev) + + def test_2_sampling(self): + """Rejection of unknown sampling method""" + assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds, + sampling_method='not_Sobol') + + def test_3_1_no_min_pool_sobol(self): + """Check that the routine stops when no minimiser is found + after maximum specified function evaluations""" + options = {'maxfev': 10, + # 'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='sobol') + numpy.testing.assert_equal(False, res.success) + # numpy.testing.assert_equal(9, res.nfev) + numpy.testing.assert_equal(12, res.nfev) + + def test_3_2_no_min_pool_simplicial(self): + """Check that the routine stops when no minimiser is found + after maximum specified sampling evaluations""" + options = {'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='simplicial') + numpy.testing.assert_equal(False, res.success) + + def test_4_1_bound_err(self): + """Specified bounds ub > lb""" + bounds = [(6, 3), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_4_2_bound_err(self): + """Specified bounds are of the form (lb, ub)""" + bounds = [(3, 5, 5), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_5_1_1_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Use infty constraints option""" + options = {'maxev': 100, + 'disp': True} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_1_2_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Do not use infty constraints option""" + options = {'maxev': 100, + 'disp': True, + 'infty_constraints': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_2_infeasible_simplicial(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded.""" + options = {'maxev': 1000, + 'disp': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='simplicial') + + numpy.testing.assert_equal(False, res.success) + + def test_6_1_lower_known_f_min(self): + """Test Global mode limiting local evaluations with f* too high""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun + 2.0, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + args = (test2_1.f, test2_1.bounds) + kwargs = {'constraints': test2_1.cons, + 'n': None, + 'iters': None, + 'options': options, + 'sampling_method': 'sobol' + } + warns(UserWarning, shgo, *args, **kwargs) + + def test(self): + from scipy.optimize import rosen, shgo + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + print(result.x, result.fun, fun.nfev) # 50 + + +# Returns +class TestShgoReturns: + def test_1_nfev_simplicial(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + numpy.testing.assert_equal(fun.nfev, result.nfev) + + def test_1_nfev_sobol(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds, sampling_method='sobol') + numpy.testing.assert_equal(fun.nfev, result.nfev) + + +def test_vector_constraint(): + # gh15514 + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.2], [3]) + oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0])) + + res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol') + assert np.all(np.sum((res.x)**2) >= 2.2) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +@pytest.mark.filterwarnings("ignore:delta_grad") +def test_trust_constr(): + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.6], [3]) + minimizer_kwargs = {'method': 'trust-constr'} + # note that we don't supply the constraints in minimizer_kwargs, + # so if the final result obeys the constraints we know that shgo + # passed them on to 'trust-constr' + res = shgo( + rosen, + [(0, 10), (0, 10)], + constraints=nlc, + sampling_method='sobol', + minimizer_kwargs=minimizer_kwargs + ) + assert np.all(np.sum((res.x)**2) >= 2.6) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +def test_equality_constraints(): + # gh16260 + bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1. + + def faulty(x): + return x[0] + x[1] + + nlc = NonlinearConstraint(faulty, 3.9, 3.9) + res = shgo(rosen, bounds=bounds, constraints=nlc) + assert_allclose(np.sum(res.x), 3.9) + + def faulty(x): + return x[0] + x[1] - 3.9 + + constraints = {'type': 'eq', 'fun': faulty} + res = shgo(rosen, bounds=bounds, constraints=constraints) + assert_allclose(np.sum(res.x), 3.9) + + bounds = [(0, 1.0)] * 4 + # sum of variable should equal 1. + def faulty(x): + return x[0] + x[1] + x[2] + x[3] - 1 + + # options = {'minimize_every_iter': True, 'local_iter':10} + constraints = {'type': 'eq', 'fun': faulty} + res = shgo( + lambda x: - np.prod(x), + bounds=bounds, + constraints=constraints, + sampling_method='sobol' + ) + assert_allclose(np.sum(res.x), 1.0) + +def test_gh16971(): + def cons(x): + return np.sum(x**2) - 0 + + c = {'fun': cons, 'type': 'ineq'} + minimizer_kwargs = { + 'method': 'COBYLA', + 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05} + } + + s = SHGO( + rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs + ) + + assert s.minimizer_kwargs['method'].lower() == 'cobyla' + assert s.minimizer_kwargs['options']['catol'] == 0.05 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4dc52cc20caf0206fe53933d4dfc6d0fbb2c34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py @@ -0,0 +1,226 @@ +import itertools + +import numpy as np +from numpy import exp +from numpy.testing import assert_, assert_equal + +from scipy.optimize import root + + +def test_performance(): + # Compare performance results to those listed in + # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)] + # and + # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)]. + # and those produced by dfsane.f from M. Raydan's website. + # + # Where the results disagree, the largest limits are taken. + + e_a = 1e-5 + e_r = 1e-4 + + table_1 = [ + dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5), + dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2), + dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11), + dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11), + # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed: + # too sensitive to rounding errors + # Results from dfsane.f; papers list nit=3, nfev=3 + dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), + # Results from dfsane.f; papers list nit=nfev=6? + dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), + dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18), + # Results from dfsane.f; papers list nit=2, nfev=12 + dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), + ] + + # Check also scaling invariance + for xscale, yscale, line_search in itertools.product( + [1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng'] + ): + for problem in table_1: + n = problem['n'] + def func(x, n): + return yscale * problem['F'](x / xscale, n) + args = (n,) + x0 = problem['x0'](n) * xscale + + fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n)) + + sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale) + sigma_0 = xscale/yscale + + with np.errstate(over='ignore'): + sol = root(func, x0, args=args, + options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1, + sigma_0=sigma_0, sigma_eps=sigma_eps, + line_search=line_search), + method='DF-SANE') + + err_msg = repr( + [xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)), + fatol, sol.success, sol.nit, sol.nfev] + ) + assert sol.success, err_msg + # nfev+1: dfsane.f doesn't count first eval + assert sol.nfev <= problem['nfev'] + 1, err_msg + assert sol.nit <= problem['nit'], err_msg + assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg + + +def test_complex(): + def func(z): + return z**2 - 1 + 2j + x0 = 2.0j + + ftol = 1e-4 + sol = root(func, x0, tol=ftol, method='DF-SANE') + + assert_(sol.success) + + f0 = np.linalg.norm(func(x0)) + fx = np.linalg.norm(func(sol.x)) + assert_(fx <= ftol*f0) + + +def test_linear_definite(): + # The DF-SANE paper proves convergence for "strongly isolated" + # solutions. + # + # For linear systems F(x) = A x - b = 0, with A positive or + # negative definite, the solution is strongly isolated. + + def check_solvability(A, b, line_search='cruz'): + def func(x): + return A.dot(x) - b + xp = np.linalg.solve(A, b) + eps = np.linalg.norm(func(xp)) * 1e3 + sol = root( + func, b, + options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search), + method='DF-SANE', + ) + assert_(sol.success) + assert_(np.linalg.norm(func(sol.x)) <= eps) + + n = 90 + + # Test linear pos.def. system + np.random.seed(1234) + A = np.arange(n*n).reshape(n, n) + A = A + n*n * np.diag(1 + np.arange(n)) + assert_(np.linalg.eigvals(A).min() > 0) + b = np.arange(n) * 1.0 + check_solvability(A, b, 'cruz') + check_solvability(A, b, 'cheng') + + # Test linear neg.def. system + check_solvability(-A, b, 'cruz') + check_solvability(-A, b, 'cheng') + + +def test_shape(): + def f(x, arg): + return x - arg + + for dt in [float, complex]: + x = np.zeros([2,2]) + arg = np.ones([2,2], dtype=dt) + + sol = root(f, x, args=(arg,), method='DF-SANE') + assert_(sol.success) + assert_equal(sol.x.shape, x.shape) + + +# Some of the test functions and initial guesses listed in +# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)] + +def F_1(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0] - 1) - 1 + g[1:] = i*(exp(x[1:] - 1) - x[1:]) + return g + +def x0_1(n): + x0 = np.empty([n]) + x0.fill(n/(n-1)) + return x0 + +def F_2(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0]) - 1 + g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1) + return g + +def x0_2(n): + x0 = np.empty([n]) + x0.fill(1/n**2) + return x0 + + +def F_4(x, n): # skip name check + assert_equal(n % 3, 0) + g = np.zeros([n]) + # Note: the first line is typoed in some of the references; + # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] + g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 + g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] + - x[2::3] + 0.2 * x[2::3]**3 + 2.16) + g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 + return g + + +def x0_4(n): # skip name check + assert_equal(n % 3, 0) + x0 = np.array([-1, 1/2, -1] * (n//3)) + return x0 + +def F_6(x, n): + c = 0.9 + mu = (np.arange(1, n+1) - 0.5)/n + return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1)) + +def x0_6(n): + return np.ones([n]) + +def F_7(x, n): + assert_equal(n % 3, 0) + + def phi(t): + v = 0.5*t - 2 + v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1] + v[t >= 2] = (0.5*t + 2)[t >= 2] + return v + g = np.zeros([n]) + g[::3] = 1e4 * x[1::3]**2 - 1 + g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001 + g[2::3] = phi(x[2::3]) + return g + +def x0_7(n): + assert_equal(n % 3, 0) + return np.array([1e-3, 18, 1] * (n//3)) + +def F_9(x, n): + g = np.zeros([n]) + i = np.arange(2, n) + g[0] = x[0]**3/3 + x[1]**2/2 + g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2 + g[-1] = -x[-1]**2/2 + n*x[-1]**3/3 + return g + +def x0_9(n): + return np.ones([n]) + +def F_10(x, n): + return np.log(1 + x) - x/n + +def x0_10(n): + return np.ones([n]) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..0a55a8b13d6b82108bd65b8ff2d08076a290af8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py @@ -0,0 +1,780 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose, assert_equal + +from scipy.optimize._bracket import _bracket_root, _bracket_minimum, _ELIMITS +import scipy._lib._elementwise_iterative_method as eim +from scipy import stats + +class TestBracketRoot: + @pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752)) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + @pytest.mark.parametrize("fix_one_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side): + # Property-based test to confirm that _bracket_root is behaving as + # expected. The basic case is when root < a < b. + # The number of times bracket expands (per side) can be found by + # setting the expression for the left endpoint of the bracket to the + # root of f (x=0), solving for i, and rounding up. The corresponding + # lower and upper ends of the bracket are found by plugging this back + # into the expression for the ends of the bracket. + # `other_side=True` is the case that a < b < root + # Special cases like a < root < b are tested separately + + rng = np.random.default_rng(seed) + xl0, d, factor = rng.random(size=3) * [1e5, 10, 5] + factor = 1 + factor # factor must be greater than 1 + xr0 = xl0 + d # xr0 must be greater than a in basic case + + def f(x): + f.count += 1 + return x # root is 0 + + if use_xmin: + xmin = -rng.random() + n = np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor)) + l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin) + else: + n = np.ceil(np.log(xr0/d) / np.log(factor)) + l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor) + + if other_side: + kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0'] + l, u = -u, -l + if 'xmin' in kwargs: + kwargs['xmax'] = -kwargs.pop('xmin') + + if fix_one_side: + if other_side: + kwargs['xmin'] = -xr0 + else: + kwargs['xmax'] = xr0 + + f.count = 0 + res = _bracket_root(f, **kwargs) + + # Compare reported number of function evaluations `nfev` against + # reported `nit`, actual function call count `f.count`, and theoretical + # number of expansions `n`. + # When both sides are free, these get multiplied by 2 because function + # is evaluated on the left and the right each iteration. + # When one side is fixed, however, we add one: on the right side, the + # function gets evaluated once at b. + # Add 1 to `n` and `res.nit` because function evaluations occur at + # iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because + # function is called separately for left and right in iteration 0. + if not fix_one_side: + assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1) + else: + assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1 + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + bracket = np.asarray([res.xl, res.xr]) + assert_allclose(bracket, (l, u)) + f_bracket = np.asarray([res.fl, res.fr]) + assert_allclose(f_bracket, f(bracket)) + + # Check that bracket is valid and that status and success are correct + assert res.xr > res.xl + signs = np.sign(f_bracket) + assert signs[0] == -signs[1] + assert res.status == 0 + assert res.success + + def f(self, q, p): + return stats.norm.cdf(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)]) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize('factor', [1.2, 2]) + def test_basic(self, p, xmin, xmax, factor): + # Test basic functionality to bracket root (distribution PPF) + res = _bracket_root(self.f, -0.01, 0.01, xmin=xmin, xmax=xmax, + factor=factor, args=(p,)) + assert_equal(-np.sign(res.fl), np.sign(res.fr)) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (p,) + maxiter = 10 + + @np.vectorize + def bracket_root_single(xl0, xr0, xmin, xmax, factor, p): + return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax, + factor=factor, args=(p,), + maxiter=maxiter) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor, + args=args, maxiter=maxiter) + refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel() + + attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + if shape: + assert np.all(res.success[1:-1]) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.nit.dtype, np.integer) + assert_equal(np.max(res.nit), f.f_evals - 2) + assert_array_less(res.xl, res.xr) + assert_allclose(res.fl, self.f(res.xl, *args)) + assert_allclose(res.fr, self.f(res.xr, *args)) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: x - 1.5, + lambda x: x - 1000, + lambda x: x - 1000, + lambda x: np.nan] + + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + res = _bracket_root(f, xl0=[-1, -1, -1, -1], xr0=[1, 1, 1, 1], + xmin=[-np.inf, -1, -np.inf, -np.inf], + xmax=[np.inf, 1, np.inf, np.inf], + args=args, maxiter=3) + + ref_flags = np.array([eim._ECONVERGED, + _ELIMITS, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, root, xmin, xmax, dtype): + # Test that dtypes are preserved + + xmin = xmin if xmin is None else dtype(xmin) + xmax = xmax if xmax is None else dtype(xmax) + root = dtype(root) + def f(x, root): + return ((x - root) ** 3).astype(dtype) + + bracket = np.asarray([-0.01, 0.01], dtype=dtype) + res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,)) + assert np.all(res.success) + assert res.xl.dtype == res.xr.dtype == dtype + assert res.fl.dtype == res.fr.dtype == dtype + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_root(None, -4, 4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4+1j, 4) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 'hello') + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=0.5) + + message = '`xmin <= xl0 < xr0 <= xmax` must be True' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, 4, -4) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmax=np.nan) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmin=10) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, [-2, -3], [3, 4, 5]) + # Consider making this give a more readable error message + # with pytest.raises(ValueError, match=message): + # _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=-1) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = _bracket_root(f, -7, 5) + assert res.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x - 10 + + bracket = (-3, 5) + res = _bracket_root(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 2 + assert res.status == -2 + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _bracket_root(f, -1, 1, args=3) + assert res.success + assert_allclose(res.fl, f(res.xl, 3)) + + # Test other edge cases + + def f(x): + f.count += 1 + return x + + # 1. root lies within guess of bracket + f.count = 0 + _bracket_root(f, -10, 20) + assert_equal(f.count, 2) + + # 2. bracket endpoint hits root exactly + f.count = 0 + res = _bracket_root(f, 5, 10, factor=2) + bracket = (res.xl, res.xr) + assert_equal(res.nfev, 4) + assert_allclose(bracket, (0, 5), atol=1e-15) + + # 3. bracket limit hits root exactly + with np.errstate(over='ignore'): + res = _bracket_root(f, 5, 10, xmin=0) + bracket = (res.xl, res.xr) + assert_allclose(bracket[0], 0, atol=1e-15) + with np.errstate(over='ignore'): + res = _bracket_root(f, -10, -5, xmax=0) + bracket = (res.xl, res.xr) + assert_allclose(bracket[1], 0, atol=1e-15) + + # 4. bracket not within min, max + with np.errstate(over='ignore'): + res = _bracket_root(f, 5, 10, xmin=1) + assert not res.success + + +class TestBracketMinimum: + def init_f(self): + def f(x, a, b): + f.count += 1 + return (x - a)**2 + b + f.count = 0 + return f + + def assert_valid_bracket(self, result): + assert np.all( + (result.xl < result.xm) & (result.xm < result.xr) + ) + assert np.all( + (result.fl >= result.fm) & (result.fr > result.fm) + | (result.fl > result.fm) & (result.fr > result.fm) + ) + + def get_kwargs( + self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=() + ): + names = ("xl0", "xr0", "xmin", "xmax", "factor", "args") + return { + name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args)) + if isinstance(val, np.ndarray) or np.isscalar(val) + or val not in [None, ()] + } + + @pytest.mark.parametrize( + "seed", + ( + 307448016549685229886351382450158984917, + 11650702770735516532954347931959000479, + 113767103358505514764278732330028568336, + ) + ) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side): + rng = np.random.default_rng(seed) + args = (0, 0) # f(x) = x^2 with minimum at 0 + # xl0, xm0, xr0 are chosen such that the initial bracket is to + # the right of the minimum, and the bracket will expand + # downhill towards zero. + xl0, d1, d2, factor = rng.random(size=4) * [1e5, 10, 10, 5] + xm0 = xl0 + d1 + xr0 = xm0 + d2 + # Factor should be greater than one. + factor += 1 + + if use_xmin: + xmin = -rng.random() * 5 + n = int(np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor))) + lower = xmin + (xl0 - xmin)*factor**-n + middle = xmin + (xl0 - xmin)*factor**-(n-1) + upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xmin + (xl0 - xmin)*factor**-n, lower, middle + ) + else: + xmin = None + n = int(np.ceil(np.log(xl0 / d1) / np.log(factor))) + lower = xl0 - d1*factor**n + middle = xl0 - d1*factor**(n-1) if n > 1 else xl0 + upper = xl0 - d1*factor**(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xl0 - d1*factor**n, lower, middle + ) + f = self.init_f() + + xmax = None + if other_side: + xl0, xm0, xr0 = -xr0, -xm0, -xl0 + xmin, xmax = None, -xmin if xmin is not None else None + lower, middle, upper = -upper, -middle, -lower + + kwargs = self.get_kwargs( + xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args + ) + result = _bracket_minimum(f, xm0, **kwargs) + + # Check that `nfev` and `nit` have the correct relationship + assert result.nfev == result.nit + 3 + # Check that `nfev` reports the correct number of function evaluations. + assert result.nfev == f.count + # Check that the number of iterations matches the theoretical value. + assert result.nit == n + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + bracket = np.asarray([result.xl, result.xm, result.xr]) + assert_allclose(bracket, (lower, middle, upper)) + f_bracket = np.asarray([result.fl, result.fm, result.fr]) + assert_allclose(f_bracket, f(bracket, *args)) + + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously + def f(xs, js): + funcs = [lambda x: (x - 1.5)**2, + lambda x: x, + lambda x: x, + lambda x: np.nan] + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + xl0, xm0, xr0 = np.full(4, -1.0), np.full(4, 0.0), np.full(4, 1.0) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, + xmin=[-np.inf, -1.0, -np.inf, -np.inf], + args=args, maxiter=3) + + reference_flags = np.array([eim._ECONVERGED, _ELIMITS, + eim._ECONVERR, eim._EVALUEERR]) + assert_equal(result.status, reference_flags) + + @pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + @pytest.mark.parametrize("xmin", [-5, None]) + @pytest.mark.parametrize("xmax", [5, None]) + def test_dtypes(self, minimum, xmin, xmax, dtype): + xmin = xmin if xmin is None else dtype(xmin) + xmax = xmax if xmax is None else dtype(xmax) + minimum = dtype(minimum) + + def f(x, minimum): + return ((x - minimum)**2).astype(dtype) + + xl0, xm0, xr0 = np.array([-0.01, 0.0, 0.01], dtype=dtype) + result = _bracket_minimum( + f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(minimum, ) + ) + assert np.all(result.success) + assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype + assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(None, -4, xl0=4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, 4+1j) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0='hello') + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x, -4, factor=0.5) + + message = '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, 4, xl0=6) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=-6) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-3, xr0=-2) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-6, xr0=-5) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-np.nan) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=np.nan) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, [-2, -3], xl0=[-3, -4, -5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=-1) + + @pytest.mark.parametrize("xl0", [0.0, None]) + @pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15)) + @pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None)) + # Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum + # is within, or at varying disances to the left or right of the initial + # bracket. + @pytest.mark.parametrize( + "args", + ( + (1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0), + (121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0) + ) + ) + def test_scalar_no_limits(self, xl0, xm0, xr0, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + # xmin is set at 0.0 in all cases. + "xl0,xm0,xr0,xmin", + ( + # Initial bracket at varying distances from the xmin. + (0.5, 0.75, 1.0, 0.0), + (1.0, 2.5, 4.0, 0.0), + (2.0, 4.0, 6.0, 0.0), + (12.0, 16.0, 20.0, 0.0), + # Test default initial left endpoint selection. It should not + # be below xmin. + (None, 0.75, 1.0, 0.0), + (None, 2.5, 4.0, 0.0), + (None, 4.0, 6.0, 0.0), + (None, 16.0, 20.0, 0.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.0, 0.0), # Minimum is directly at xmin. + (1e-300, 0.0), # Minimum is extremely close to xmin. + (1e-20, 0.0), # Minimum is very close to xmin. + # Minimum at varying distances from xmin. + (0.1, 0.0), + (0.2, 0.0), + (0.4, 0.0) + ) + ) + def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + #xmax is set to 1.0 in all cases. + "xl0,xm0,xr0,xmax", + ( + # Bracket at varying distances from xmax. + (0.2, 0.3, 0.4, 1.0), + (0.05, 0.075, 0.1, 1.0), + (-0.2, -0.1, 0.0, 1.0), + (-21.2, -17.7, -14.2, 1.0), + # Test default right endpoint selection. It should not exceed xmax. + (0.2, 0.3, None, 1.0), + (0.05, 0.075, None, 1.0), + (-0.2, -0.1, None, 1.0), + (-21.2, -17.7, None, 1.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.9999999999999999, 0.0), # Minimum very close to xmax. + # Minimum at varying distances from xmax. + (0.9, 0.0), + (0.7, 0.0), + (0.5, 0.0) + ) + ) + def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + "xl0,xm0,xr0,xmin,xmax,args", + ( + ( # Case 1: + # Initial bracket. + 0.2, + 0.3, + 0.4, + # Function slopes down to the right from the bracket to a minimum + # at 1.0. xmax is also at 1.0 + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 2: + # Initial bracket. + 1.4, + 1.95, + 2.5, + # Function slopes down to the left from the bracket to a minimum at + # 0.3 with xmin set to 0.3. + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 3: + # Initial bracket. + 2.6, + 3.25, + 3.9, + # Function slopes down and to the right to a minimum at 99.4 with xmax + # at 99.4. Tests case where minimum is at xmax relatively further from + # the bracket. + None, + 99.4, + (99.4, 0) + ), + ( + # Case 4: + # Initial bracket. + 4, + 4.5, + 5, + # Function slopes down and to the left away from the bracket with a + # minimum at -26.3 with xmin set to -26.3. Tests case where minimum is + # at xmin relatively far from the bracket. + -26.3, + None, + (-26.3, 0) + ), + ( + # Case 5: + # Similar to Case 1 above, but tests default values of xl0 and xr0. + None, + 0.3, + None, + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 6: + # Similar to Case 2 above, but tests default values of xl0 and xr0. + None, + 1.95, + None, + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 7: + # Similar to Case 3 above, but tests default values of xl0 and xr0. + None, + 3.25, + None, + None, + 99.4, + (99.4, 0) + ), + ( + # Case 8: + # Similar to Case 4 above, but tests default values of xl0 and xr0. + None, + 4.5, + None, + -26.3, + None, + (-26.3, 0) + ), + ) + ) + def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args): + f = self.init_f() + kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + assert result.status == -1 + assert args[0] in (result.xl, result.xr) + assert result.nfev == f.count + + @pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for + # various input shapes. + a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (a, 0.0) + maxiter = 10 + + @np.vectorize + def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a): + return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin, + xmax=xmax, factor=factor, maxiter=maxiter, + args=(a, 0.0)) + + f = self.init_f() + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, + factor=factor, args=args, maxiter=maxiter) + refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel() + + attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + if shape: + assert np.all(res.success[1:-1]) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.nit.dtype, np.integer) + assert_equal(np.max(res.nit), f.count - 3) + self.assert_valid_bracket(res) + assert_allclose(res.fl, f(res.xl, *args)) + assert_allclose(res.fm, f(res.xm, *args)) + assert_allclose(res.fr, f(res.xr, *args)) + + def test_special_cases(self): + # Test edge cases and other special cases. + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 98 - 1 + + result = _bracket_minimum(f, -7, xr0=5) + assert result.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**2 - 10 + + xl0, xm0, xr0 = -3, -1, 2 + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0) + assert_equal([result.xl, result.xm, result.xr], [xl0, xm0, xr0]) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x**2 - 1 + + result = _bracket_minimum(f, -1, args=3) + assert result.success + assert_allclose(result.fl, f(result.xl, 3)) + + # Initial bracket is valid. + f = self.init_f() + xl0, xm0, xr0 = [-1.0, -0.2, 1.0] + args = (0, 0) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args) + assert f.count == 3 + + assert_equal( + [result.xl, result.xm, result.xr], + [xl0, xm0, xr0], + ) + assert_equal( + [result.fl, result.fm, result.fr], + [f(xl0, *args), f(xm0, *args), f(xr0, *args)], + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..5c88e23b9f756edb861157d58986bb13e5fa0810 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py @@ -0,0 +1,827 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_array_less + +from scipy import stats +import scipy._lib._elementwise_iterative_method as eim + +from scipy.optimize._chandrupatla import (_chandrupatla_minimize, + _chandrupatla as _chandrupatla_root) +from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS + +from itertools import permutations +from .test_zeros import TestScalarRootFinders + +def f1(x): + return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2. + + +def f2(x): + return 5 + (x - 2.)**6 + + +def f3(x): + return np.exp(x) - 5*x + + +def f4(x): + return x**5. - 5*x**3. - 20.*x + 5. + + +def f5(x): + return 8*x**3 - 2*x**2 - 7*x + 3 + + +def _bracket_minimum(func, x1, x2): + phi = 1.61803398875 + maxiter = 100 + f1 = func(x1) + f2 = func(x2) + step = x2 - x1 + x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1 + else (x1, x2, f1, f2, step)) + + for i in range(maxiter): + step *= phi + x3 = x2 + step + f3 = func(x3) + if f3 < f2: + x1, x2, f1, f2 = x2, x3, f2, f3 + else: + break + return x1, x2, x3, f1, f2, f3 + + +cases = [ + (f1, -1, 11), + (f1, -2, 13), + (f1, -4, 13), + (f1, -8, 15), + (f1, -16, 16), + (f1, -32, 19), + (f1, -64, 20), + (f1, -128, 21), + (f1, -256, 21), + (f1, -512, 19), + (f1, -1024, 24), + (f2, -1, 8), + (f2, -2, 6), + (f2, -4, 6), + (f2, -8, 7), + (f2, -16, 8), + (f2, -32, 8), + (f2, -64, 9), + (f2, -128, 11), + (f2, -256, 13), + (f2, -512, 12), + (f2, -1024, 13), + (f3, -1, 11), + (f3, -2, 11), + (f3, -4, 11), + (f3, -8, 10), + (f3, -16, 14), + (f3, -32, 12), + (f3, -64, 15), + (f3, -128, 18), + (f3, -256, 18), + (f3, -512, 19), + (f3, -1024, 19), + (f4, -0.05, 9), + (f4, -0.10, 11), + (f4, -0.15, 11), + (f4, -0.20, 11), + (f4, -0.25, 11), + (f4, -0.30, 9), + (f4, -0.35, 9), + (f4, -0.40, 9), + (f4, -0.45, 10), + (f4, -0.50, 10), + (f4, -0.55, 10), + (f5, -0.05, 6), + (f5, -0.10, 7), + (f5, -0.15, 8), + (f5, -0.20, 10), + (f5, -0.25, 9), + (f5, -0.30, 8), + (f5, -0.35, 7), + (f5, -0.40, 7), + (f5, -0.45, 9), + (f5, -0.50, 9), + (f5, -0.55, 8) +] + + +class TestChandrupatlaMinimize: + + def f(self, x, loc): + dist = stats.norm() + return -dist.pdf(x - loc) + + @pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)]) + def test_basic(self, loc): + # Find mode of normal distribution. Compare mode against location + # parameter and value of pdf at mode against expected pdf. + res = _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc,)) + ref = loc + np.testing.assert_allclose(res.x, ref, rtol=1e-6) + np.testing.assert_allclose(res.fun, -stats.norm.pdf(0), atol=0, rtol=0) + assert res.x.shape == np.shape(ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + loc = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (loc,) + + @np.vectorize + def chandrupatla_single(loc_single): + return _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc_single,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_minimize(f, -5, 0, 5, args=args) + refs = chandrupatla_single(loc).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_fun = [ref.fun for ref in refs] + assert_allclose(res.fun.ravel(), ref_fun) + assert_equal(res.fun.shape, shape) + assert_equal(res.fun, self.f(res.x, *args)) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.f_evals) + assert_equal(res.nfev.shape, res.fun.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.f_evals-3) + assert_equal(res.nit.shape, res.fun.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + ref_xl = [ref.xl for ref in refs] + assert_allclose(res.xl.ravel(), ref_xl) + assert_equal(res.xl.shape, shape) + + ref_xm = [ref.xm for ref in refs] + assert_allclose(res.xm.ravel(), ref_xm) + assert_equal(res.xm.shape, shape) + + ref_xr = [ref.xr for ref in refs] + assert_allclose(res.xr.ravel(), ref_xr) + assert_equal(res.xr.shape, shape) + + ref_fl = [ref.fl for ref in refs] + assert_allclose(res.fl.ravel(), ref_fl) + assert_equal(res.fl.shape, shape) + assert_allclose(res.fl, self.f(res.xl, *args)) + + ref_fm = [ref.fm for ref in refs] + assert_allclose(res.fm.ravel(), ref_fm) + assert_equal(res.fm.shape, shape) + assert_allclose(res.fm, self.f(res.xm, *args)) + + ref_fr = [ref.fr for ref in refs] + assert_allclose(res.fr.ravel(), ref_fr) + assert_equal(res.fr.shape, shape) + assert_allclose(res.fr, self.f(res.xr, *args)) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: (x - 2.5) ** 2, + lambda x: x - 10, + lambda x: (x - 2.5) ** 4, + lambda x: np.nan] + + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + + res = _chandrupatla_minimize(f, [0]*4, [2]*4, [np.pi]*4, args=args, + maxiter=10) + + ref_flags = np.array([eim._ECONVERGED, + eim._ESIGNERR, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = rng.random(size=3) + bracket = (-5, 0, 5) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = abs(res1.xr - res1.xl) + assert_array_less(j1, 4*kwargs['xatol']) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = abs(res2.xr - res2.xl) + assert_array_less(j2, 4*kwargs['xatol']) + assert_array_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = abs(res1.xr - res1.xl) + assert_array_less(j1, 4*kwargs['xrtol']*abs(res1.x)) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = abs(res2.xr - res2.xl) + assert_array_less(j2, 4*kwargs['xrtol']*abs(res2.x)) + assert_array_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = abs(res1.fl - 2 * res1.fm + res1.fr) + assert_array_less(h1, 2*kwargs['fatol']) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = abs(res2.fl - 2 * res2.fm + res2.fr) + assert_array_less(h2, 2*kwargs['fatol']) + assert_array_less(h2, h1) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = abs(res1.fl - 2 * res1.fm + res1.fr) + assert_array_less(h1, 2*kwargs['frtol']*abs(res1.fun)) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = abs(res2.fl - 2 * res2.fm + res2.fr) + assert_array_less(h2, 2*kwargs['frtol']*abs(res2.fun)) + assert_array_less(h2, h1) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + loc = 0.612814 + bracket = (-5, 0, 5) + maxiter = 5 + + res = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + maxiter=maxiter) + assert not np.any(res.success) + assert np.all(res.nfev == maxiter+3) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xm, res.xr) == bracket + else: + changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr) + changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr) + assert np.all(changed_xr | changed_xl) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + assert_equal(self.f(res.xl, loc), res.fl) + assert_equal(self.f(res.xm, loc), res.fm) + assert_equal(self.f(res.xr, loc), res.fr) + assert_equal(self.f(res.x, loc), res.fun) + if callback.iter == maxiter: + raise StopIteration + + callback.xl = np.nan + callback.xr = np.nan + callback.iter = -1 # callback called once before first iteration + callback.res = None + + res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize('case', cases) + def test_nit_expected(self, case): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 55 test cases, the number of iterations performed + # matches the number reported in the original paper. + func, x1, nit = case + + # Find bracket using the algorithm in the paper + step = 0.2 + x2 = x1 + step + x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2) + + # Use tolerances from original paper + xatol = 0.0001 + fatol = 0.000001 + xrtol = 1e-16 + frtol = 1e-16 + + res = _chandrupatla_minimize(func, x1, x2, x3, xatol=xatol, + fatol=fatol, xrtol=xrtol, frtol=frtol) + assert_equal(res.nit, nit) + + @pytest.mark.parametrize("loc", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, loc, dtype): + # Test that dtypes are preserved + + loc = dtype(loc) + + def f(x, loc): + assert x.dtype == dtype + return ((x - loc) ** 2).astype(dtype) + + res = _chandrupatla_minimize(f, dtype(-3), dtype(1), dtype(5), + args=(loc,)) + assert res.x.dtype == dtype + assert_allclose(res.x, loc, rtol=np.sqrt(np.finfo(dtype).eps)) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(None, -4, 0, 4) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4+1j, 0, 4) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, [-2, -3], [0, 0], [3, 4, 5]) + + message = "The shape of the array returned by `func` must be the same" + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: [x[0], x[1], x[1]], [-3, -3], + [0, 0], [5, 5]) + + message = 'Tolerances must be non-negative scalars.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, xrtol=np.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, frtol=np.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, callback='shrubbery') + + def test_bracket_order(self): + # Confirm that order of points in bracket doesn't matter + loc = np.linspace(-1, 1, 6)[:, np.newaxis] + brackets = np.array(list(permutations([-5, 0, 5]))).T + res = _chandrupatla_minimize(self.f, *brackets, args=(loc,)) + assert np.all(np.isclose(res.x, loc) | (res.fun == self.f(loc, loc))) + ref = res.x[:, 0] # all columns should be the same + assert_allclose(*np.broadcast_arrays(res.x.T, ref), rtol=1e-15) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return (x-1) ** 100 + + with np.errstate(invalid='ignore'): + res = _chandrupatla_minimize(f, -7, 0, 8, fatol=0, frtol=0) + assert res.success + assert_allclose(res.x, 1, rtol=1e-3) + assert_equal(res.fun, 0) + + # Test that if all elements of bracket equal minimizer, algorithm + # reports convergence + def f(x): + return (x-1)**2 + + res = _chandrupatla_minimize(f, 1, 1, 1) + assert res.success + assert_equal(res.x, 1) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return (x-1)**2 + + bracket = (-3, 1.1, 5) + res = _chandrupatla_minimize(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 3 + assert res.status == -2 + assert res.x == 1.1 # best so far + + # Test scalar `args` (not in tuple) + def f(x, c): + return (x-c)**2 - 1 + + res = _chandrupatla_minimize(f, -1, 0, 1, args=1/3) + assert_allclose(res.x, 1/3) + + # Test zero tolerances + # TODO: fatol/frtol = 0? + def f(x): + return -np.sin(x) + + res = _chandrupatla_minimize(f, 0, 1, np.pi, xatol=0, xrtol=0, + fatol=0, frtol=0) + assert res.success + # found a minimum exactly (according to floating point arithmetic) + assert res.xl < res.xm < res.xr + assert f(res.xl) == f(res.xm) == f(res.xr) + + +class TestChandrupatla(TestScalarRootFinders): + + def f(self, q, p): + return stats.norm.cdf(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, p): + # Invert distribution CDF and compare against distrtibution `ppf` + res = _chandrupatla_root(self.f, -5, 5, args=(p,)) + ref = stats.norm().ppf(p) + np.testing.assert_allclose(res.x, ref) + assert res.x.shape == ref.shape + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (p,) + + @np.vectorize + def chandrupatla_single(p): + return _chandrupatla_root(self.f, -5, 5, args=(p,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_root(f, -5, 5, args=args) + refs = chandrupatla_single(p).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_fun = [ref.fun for ref in refs] + assert_allclose(res.fun.ravel(), ref_fun) + assert_equal(res.fun.shape, shape) + assert_equal(res.fun, self.f(res.x, *args)) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.f_evals) + assert_equal(res.nfev.shape, res.fun.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.f_evals-2) + assert_equal(res.nit.shape, res.fun.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + ref_xl = [ref.xl for ref in refs] + assert_allclose(res.xl.ravel(), ref_xl) + assert_equal(res.xl.shape, shape) + + ref_xr = [ref.xr for ref in refs] + assert_allclose(res.xr.ravel(), ref_xr) + assert_equal(res.xr.shape, shape) + + assert_array_less(res.xl, res.xr) + finite = np.isfinite(res.x) + assert np.all((res.x[finite] == res.xl[finite]) + | (res.x[finite] == res.xr[finite])) + + ref_fl = [ref.fl for ref in refs] + assert_allclose(res.fl.ravel(), ref_fl) + assert_equal(res.fl.shape, shape) + assert_allclose(res.fl, self.f(res.xl, *args)) + + ref_fr = [ref.fr for ref in refs] + assert_allclose(res.fr.ravel(), ref_fr) + assert_equal(res.fr.shape, shape) + assert_allclose(res.fr, self.f(res.xr, *args)) + + assert np.all(np.abs(res.fun[finite]) == + np.minimum(np.abs(res.fl[finite]), + np.abs(res.fr[finite]))) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: x - 2.5, + lambda x: x - 10, + lambda x: (x - 0.1)**3, + lambda x: np.nan] + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + res = _chandrupatla_root(f, [0]*4, [np.pi]*4, args=args, maxiter=2) + + ref_flags = np.array([eim._ECONVERGED, + eim._ESIGNERR, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = rng.random(size=3) + bracket = (-5, 5) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res1.xr - res1.xl, 1e-3) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res2.xr - res2.xl, 1e-6) + assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res1.xr - res1.xl, 1e-3 * np.abs(res1.x)) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res2.xr - res2.xl, 1e-6 * np.abs(res2.x)) + assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res1.fun), 1e-3) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res2.fun), 1e-6) + assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + x1, x2 = bracket + f0 = np.minimum(abs(self.f(x1, *args)), abs(self.f(x2, *args))) + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res1.fun), 1e-3*f0) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res2.fun), 1e-6*f0) + assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + p = 0.612814 + bracket = (-5, 5) + maxiter = 5 + + def f(q, p): + res = stats.norm().cdf(q) - p + f.x = q + f.fun = res + return res + f.x = None + f.fun = None + + res = _chandrupatla_root(f, *bracket, args=(p,), + maxiter=maxiter) + assert not np.any(res.success) + assert np.all(res.nfev == maxiter+2) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xr) == bracket + else: + changed = (((res.xl == callback.xl) & (res.xr != callback.xr)) + | ((res.xl != callback.xl) & (res.xr == callback.xr))) + assert np.all(changed) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + assert_equal(self.f(res.xl, p), res.fl) + assert_equal(self.f(res.xr, p), res.fr) + assert_equal(self.f(res.x, p), res.fun) + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.xl = None + callback.xr = None + + res2 = _chandrupatla_root(f, *bracket, args=(p,), + callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS) + def test_nit_expected(self, case): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 40 test cases, the number of iterations performed + # matches the number reported in the original paper. + f, bracket, root, nfeval, id = case + # Chandrupatla's criterion is equivalent to + # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard + # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x + # that used by Chandrupatla in tests. + res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5) + assert_allclose(res.fun, f(root), rtol=1e-8, atol=2e-3) + assert_equal(res.nfev, nfeval) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, root, dtype): + # Test that dtypes are preserved + + root = dtype(root) + def f(x, root): + return ((x - root) ** 3).astype(dtype) + + res = _chandrupatla_root(f, dtype(-3), dtype(5), + args=(root,), xatol=1e-3) + assert res.x.dtype == dtype + assert np.allclose(res.x, root, atol=1e-3) or np.all(res.fun == 0) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(None, -4, 4) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4+1j, 4) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, [-2, -3], [3, 4, 5]) + + message = "The shape of the array returned by `func`..." + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) + + message = 'Tolerances must be non-negative scalars.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, xrtol=np.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, frtol=np.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, callback='shrubbery') + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = _chandrupatla_root(f, -7, 5) + assert res.success + assert_allclose(res.x, 1) + + # Test that if both ends of bracket equal root, algorithm reports + # convergence + def f(x): + return x**2 - 1 + + res = _chandrupatla_root(f, 1, 1) + assert res.success + assert_equal(res.x, 1) + + def f(x): + return 1/x + + with np.errstate(invalid='ignore'): + res = _chandrupatla_root(f, np.inf, np.inf) + assert res.success + assert_equal(res.x, np.inf) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**3 - 1 + + bracket = (-3, 5) + res = _chandrupatla_root(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 2 + assert res.status == -2 + assert res.x == -3 # best so far + + # Test maxiter = 1 + res = _chandrupatla_root(f, *bracket, maxiter=1) + assert res.success + assert res.status == 0 + assert res.nit == 1 + assert res.nfev == 3 + assert_allclose(res.x, 1) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _chandrupatla_root(f, -1, 1, args=3) + assert_allclose(res.x, 1/3) + + # # TODO: Test zero tolerance + # # ~~What's going on here - why are iterations repeated?~~ + # # tl goes to zero when xatol=xrtol=0. When function is nearly linear, + # # this causes convergence issues. + # def f(x): + # return np.cos(x) + # + # res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0) + # assert res.nit < 100 + # xp = np.nextafter(res.x, np.inf) + # xm = np.nextafter(res.x, -np.inf) + # assert np.abs(res.fun) < np.abs(f(xp)) + # assert np.abs(res.fun) < np.abs(f(xm)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..11663ce778beb7e1046143b93fe2508f469727c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py @@ -0,0 +1,166 @@ +import math + +import numpy as np +from numpy.testing import assert_allclose, assert_, assert_array_equal +import pytest + +from scipy.optimize import fmin_cobyla, minimize, Bounds + + +class TestCobyla: + def setup_method(self): + self.x0 = [4.95, 0.66] + self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] + self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5, + 'maxiter': 100} + + def fun(self, x): + return x[0]**2 + abs(x[1])**3 + + def con1(self, x): + return x[0]**2 + x[1]**2 - 25 + + def con2(self, x): + return -self.con1(x) + + @pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely') + def test_simple(self, capfd): + # use disp=True as smoke test for gh-8118 + x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, + rhoend=1e-5, maxfun=100, disp=True) + assert_allclose(x, self.solution, atol=1e-4) + + def test_minimize_simple(self): + class Callback: + def __init__(self): + self.n_calls = 0 + self.last_x = None + + def __call__(self, x): + self.n_calls += 1 + self.last_x = x + + callback = Callback() + + # Minimize with method='COBYLA' + cons = ({'type': 'ineq', 'fun': self.con1}, + {'type': 'ineq', 'fun': self.con2}) + sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, + callback=callback, options=self.opts) + assert_allclose(sol.x, self.solution, atol=1e-4) + assert_(sol.success, sol.message) + assert_(sol.maxcv < 1e-5, sol) + assert_(sol.nfev < 70, sol) + assert_(sol.fun < self.fun(self.solution) + 1e-3, sol) + assert_(sol.nfev == callback.n_calls, + "Callback is not called exactly once for every function eval.") + assert_array_equal( + sol.x, + callback.last_x, + "Last design vector sent to the callback is not equal to returned value.", + ) + + def test_minimize_constraint_violation(self): + np.random.seed(1234) + pb = np.random.rand(10, 10) + spread = np.random.rand(10) + + def p(w): + return pb.dot(w) + + def f(w): + return -(w * spread).sum() + + def c1(w): + return 500 - abs(p(w)).sum() + + def c2(w): + return 5 - abs(p(w).sum()) + + def c3(w): + return 5 - abs(p(w)).max() + + cons = ({'type': 'ineq', 'fun': c1}, + {'type': 'ineq', 'fun': c2}, + {'type': 'ineq', 'fun': c3}) + w0 = np.zeros((10,)) + sol = minimize(f, w0, method='cobyla', constraints=cons, + options={'catol': 1e-6}) + assert_(sol.maxcv > 1e-6) + assert_(not sol.success) + + +def test_vector_constraints(): + # test that fmin_cobyla and minimize can take a combination + # of constraints, some returning a number and others an array + def fun(x): + return (x[0] - 1)**2 + (x[1] - 2.5)**2 + + def fmin(x): + return fun(x) - 1 + + def cons1(x): + a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) + return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + + a[i, 2] for i in range(len(a))]) + + def cons2(x): + return x # identity, acts as bounds x > 0 + + x0 = np.array([2, 0]) + cons_list = [fun, cons1, cons2] + + xsol = [1.4, 1.7] + fsol = 0.8 + + # testing fmin_cobyla + sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) + assert_allclose(sol, xsol, atol=1e-4) + + sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) + assert_allclose(fun(sol), 1, atol=1e-4) + + # testing minimize + constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.x, xsol, atol=1e-4) + assert_(sol.success, sol.message) + assert_allclose(sol.fun, fsol, atol=1e-4) + + constraints = {'type': 'ineq', 'fun': fmin} + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.fun, 1, atol=1e-4) + + +class TestBounds: + # Test cobyla support for bounds (only when used via `minimize`) + # Invalid bounds is tested in + # test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds + + def test_basic(self): + def f(x): + return np.sum(x**2) + + lb = [-1, None, 1, None, -0.5] + ub = [-0.5, -0.5, None, None, -0.5] + bounds = [(a, b) for a, b in zip(lb, ub)] + # these are converted to Bounds internally + + res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds) + ref = [-0.5, -0.5, 1, 0, -0.5] + assert res.success + assert_allclose(res.x, ref, atol=1e-3) + + def test_unbounded(self): + def f(x): + return np.sum(x**2) + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, 0, atol=1e-3) + + bounds = Bounds([1, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, [1, 0], atol=1e-3) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..f52c65cdd1d5075b978ac965d96dedbbeeb82e57 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py @@ -0,0 +1,274 @@ +""" +Unit test for constraint conversion +""" + +import numpy as np +from numpy.testing import (assert_array_almost_equal, + assert_allclose, assert_warns, suppress_warnings) +import pytest +from scipy.optimize import (NonlinearConstraint, LinearConstraint, + OptimizeWarning, minimize, BFGS) +from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock, + IneqRosenbrock, EqIneqRosenbrock, + BoundedRosenbrock, Elec) + + +class TestOldToNew: + x0 = (2, 0) + bnds = ((0, None), (0, None)) + method = "trust-constr" + + def test_constraint_dictionary_1(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.4, 1.7], rtol=1e-4) + assert_allclose(res.fun, 0.8, rtol=1e-4) + + def test_constraint_dictionary_2(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = {'type': 'eq', + 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1], + 'args': (1, 1.1), + 'jac': lambda x, p1, p2: np.array([[p1, -p2]])} + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.7918552, 1.62895927]) + assert_allclose(res.fun, 1.3857466063348418) + + def test_constraint_dictionary_3(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)] + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.75, 1.75], rtol=1e-4) + assert_allclose(res.fun, 1.125, rtol=1e-4) + + +class TestNewToOld: + + def test_multiple_constraint_objects(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # mixed old and new + coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([LinearConstraint([1, -2, 0], -2, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4) + + def test_individual_constraint_objects(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + + cone = [] # with equality constraints (can't use cobyla) + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # nonstandard data types for constraint equality bounds + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([1.21]))) + + # multiple equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, 1.21)) # two same equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.4], [1.21, 1.4])) # two different equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.21], 1.21)) # equality specified two ways + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded + + # nonstandard data types for constraint inequality bounds + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([np.inf]))) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + np.array(-np.inf), -3)) + + # multiple inequalities/equalities + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, np.inf)) # two same inequalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.1, .8], [1.2, 1.4])) # bounded above and below + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below + + # quick check of LinearConstraint class (very little new code to test) + cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], + [1.21, -np.inf], [1.21, 1.4])) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) + + for con in cone: + funs = {} + for method in methods[::2]: # skip cobyla + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + + +class TestNewToOldSLSQP: + method = 'slsqp' + elec = Elec(n_electrons=2) + elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047, + -0.73597044, 0.34180668, -0.34180667]) + brock = BoundedRosenbrock() + brock.x_opt = [0, 0] + list_of_problems = [Maratos(), + HyperbolicIneq(), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + elec, + brock + ] + + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_array_almost_equal(result.x, prob.x_opt, decimal=3) + + def test_warn_mixed_constraints(self): + # warns about inefficiency of mixed equality/inequality constraints + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], + [1.1, .8], [1.1, 1.4]) + bnds = ((0, None), (0, None), (0, None)) + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), + method=self.method, bounds=bnds, constraints=cons) + + def test_warn_ignored_options(self): + # warns about constraint options being ignored + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = (2, 0, 1) + + if self.method == "slsqp": + bnds = ((0, None), (0, None), (0, None)) + else: + bnds = None + + cons = NonlinearConstraint(lambda x: x[0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = LinearConstraint([1, 0, 0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = [] + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + keep_feasible=True)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + hess=BFGS())) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_jac_sparsity=42)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_rel_step=42)) + cons.append(LinearConstraint([1, 0, 0], 2, np.inf, + keep_feasible=True)) + for con in cons: + assert_warns(OptimizeWarning, minimize, fun, x0, + method=self.method, bounds=bnds, constraints=cons) + + +class TestNewToOldCobyla: + method = 'cobyla' + + list_of_problems = [ + Elec(n_electrons=2), + Elec(n_electrons=4), + ] + + @pytest.mark.slow + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + truth = minimize(prob.fun, prob.x0, + method='trust-constr', + bounds=prob.bounds, + constraints=prob.constr) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_allclose(result.fun, truth.fun, rtol=1e-3) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4186ba7b6dd6f56b89e2f39add9eb16e6beccb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py @@ -0,0 +1,255 @@ +import pytest +import numpy as np +from numpy.testing import TestCase, assert_array_equal +import scipy.sparse as sps +from scipy.optimize._constraints import ( + Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint, + new_bounds_to_old, old_bound_to_new, strict_bounds) + + +class TestStrictBounds(TestCase): + def test_scalarvalue_unique_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [2, 2, 2]) + assert_array_equal(strict_ub, [4, 4, 4]) + + def test_vectorvalue_unique_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 5, 6] + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, 2, 3]) + assert_array_equal(strict_ub, [4, 5, 6]) + + def test_scalarvalue_vector_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = [False, True, False] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, 2, -np.inf]) + assert_array_equal(strict_ub, [np.inf, 4, np.inf]) + + def test_vectorvalue_vector_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 6, np.inf] + enforce_feasibility = [True, False, True] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, -np.inf, 3]) + assert_array_equal(strict_ub, [4, np.inf, np.inf]) + + +def test_prepare_constraint_infeasible_x0(): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + enforce_feasibility = np.array([False, True, True], dtype=bool) + bounds = Bounds(lb, ub, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, bounds, x0) + + pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3]) + assert (pc.violation([1, 2, 3]) > 0).any() + assert (pc.violation([0.25, 21, 31]) == 0).all() + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + enforce_feasibility = np.array([True, True, True], dtype=bool) + linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, linear, x0) + + pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0), + [1, 2, 3, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def fun(x): + return A.dot(x) + + def jac(x): + return A + + def hess(x, v): + return sps.csr_matrix((4, 4)) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess, + enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, nonlinear, x0) + + pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + +def test_violation(): + def cons_f(x): + return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) + + nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) + pc = PreparedConstraint(nlc, [0.5, 1]) + + assert_array_equal(pc.violation([0.5, 1]), [0., 0.]) + + np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1]) + + np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14]) + + +def test_new_bounds_to_old(): + lb = np.array([-np.inf, 2, 3]) + ub = np.array([3, np.inf, 10]) + + bounds = [(None, 3), (2, None), (3, 10)] + assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds) + + bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)] + assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb) + + bounds_no_lb = [(None, 3), (None, None), (None, 10)] + assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb) + + bounds_single_ub = [(None, 20), (2, 20), (3, 20)] + assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub) + + bounds_no_ub = [(None, None), (2, None), (3, None)] + assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub) + + bounds_single_both = [(1, 2), (1, 2), (1, 2)] + assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both) + + bounds_no_both = [(None, None), (None, None), (None, None)] + assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both) + + +def test_old_bounds_to_new(): + bounds = ([1, 2], (None, 3), (-1, None)) + lb_true = np.array([1, -np.inf, -1]) + ub_true = np.array([2, 3, np.inf]) + + lb, ub = old_bound_to_new(bounds) + assert_array_equal(lb, lb_true) + assert_array_equal(ub, ub_true) + + bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))] + lb, ub = old_bound_to_new(bounds) + + assert_array_equal(lb, [-np.inf, 1]) + assert_array_equal(ub, [np.inf, 1]) + + +class TestBounds: + def test_repr(self): + # so that eval works + from numpy import array, inf # noqa: F401 + for args in ( + (-1.0, 5.0), + (-1.0, np.inf, True), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf])), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf]), + np.array([True, False])), + ): + bounds = Bounds(*args) + bounds2 = eval(repr(Bounds(*args))) + assert_array_equal(bounds.lb, bounds2.lb) + assert_array_equal(bounds.ub, bounds2.ub) + assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible) + + def test_array(self): + # gh13501 + b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0]) + assert isinstance(b.lb, np.ndarray) + assert isinstance(b.ub, np.ndarray) + + def test_defaults(self): + b1 = Bounds() + b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf)) + assert b1.lb == b2.lb + assert b1.ub == b2.ub + + def test_input_validation(self): + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + Bounds(sps.coo_array([1, 2]), [1, 2]) + with pytest.raises(ValueError, match=message): + Bounds([1, 2], sps.coo_array([1, 2])) + + message = "`keep_feasible` must be a dense array." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True])) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2, 3]) + + def test_residual(self): + bounds = Bounds(-2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2])) + + +class TestLinearConstraint: + def test_defaults(self): + A = np.eye(4) + lc = LinearConstraint(A) + lc2 = LinearConstraint(A, -np.inf, np.inf) + assert_array_equal(lc.lb, lc2.lb) + assert_array_equal(lc.ub, lc2.ub) + + def test_input_validation(self): + A = np.eye(4) + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], [1, 2, 3]) + + message = "Constraint limits must be dense arrays" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, sps.coo_array([1, 2]), [2, 3]) + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], sps.coo_array([2, 3])) + + message = "`keep_feasible` must be a dense array" + with pytest.raises(ValueError, match=message): + keep_feasible = sps.coo_array([True, True]) + LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible) + + A = np.empty((4, 3, 5)) + message = "`A` must have exactly two dimensions." + with pytest.raises(ValueError, match=message): + LinearConstraint(A) + + def test_residual(self): + A = np.eye(2) + lc = LinearConstraint(A, -2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2])) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..2f859c1143eb6b63c439fe278bfdd4fdaa15410f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py @@ -0,0 +1,92 @@ +""" +Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``, +and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a +3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st, +2nd, and 3rd order terms in ``args``. + +.. math:: + + f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0 + +The 3rd order polynomial function is written in Cython and called in a Python +wrapper named after the zero function. See the private ``_zeros`` Cython module +in `scipy.optimize.cython_optimze` for more information. +""" + +import numpy.testing as npt +from scipy.optimize.cython_optimize import _zeros + +# CONSTANTS +# Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9]. +# The ARGS have 3 elements just to show how this could be done for any cubic +# polynomial. +A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term +ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms +XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions +# absolute and relative tolerances and max iterations for zeros functions +XTOL, RTOL, MITR = 0.001, 0.001, 10 +EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0] +# = [1.2599210498948732, +# 1.2805791649874942, +# 1.300591446851387, +# 1.3200061217959123, +# 1.338865900164339, +# 1.3572088082974532, +# 1.375068867074141, +# 1.3924766500838337, +# 1.4094597464129783, +# 1.4260431471424087] + + +# test bisect +def test_bisect(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test ridder +def test_ridder(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brenth +def test_brenth(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq +def test_brentq(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq with full output +def test_brentq_full_output(): + output = _zeros.full_output_example( + (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR) + npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL) + npt.assert_equal(6, output['iterations']) + npt.assert_equal(7, output['funcalls']) + npt.assert_equal(0, output['error_num']) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..80e1606a8710e67bcd500303e5ae5d54aa3a5676 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py @@ -0,0 +1,758 @@ +import pytest +import numpy as np +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_, assert_allclose, + assert_equal) +from scipy.sparse import csr_matrix +from scipy.sparse.linalg import LinearOperator +from scipy.optimize._differentiable_functions import (ScalarFunction, + VectorFunction, + LinearVectorFunction, + IdentityVectorFunction) +from scipy.optimize import rosen, rosen_der, rosen_hess +from scipy.optimize._hessian_update_strategy import BFGS + + +class ExScalarFunction: + + def __init__(self): + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + self.ngev += 1 + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + self.nhev += 1 + return 4*np.eye(2) + + +class TestScalarFunction(TestCase): + + def test_finite_difference_grad(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, nfev) + approx = ScalarFunction(ex.fun, x0, (), '2-point', + ex.hess, None, (-np.inf, np.inf)) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + + x = [10, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.0, 1.0] + g_analit = analit.grad(x) + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + def test_fun_and_grad(self): + ex = ExScalarFunction() + + def fg_allclose(x, y): + assert_allclose(x[0], y[0]) + assert_allclose(x[1], y[1]) + + # with analytic gradient + x0 = [2.0, 0.3] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + assert analit.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + + # with finite difference gradient + x0 = [2.0, 0.3] + sf = ScalarFunction(ex.fun, x0, (), '3-point', + ex.hess, None, (-np.inf, np.inf)) + assert sf.ngev == 1 + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + assert sf.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + + def test_finite_difference_hess_linear_operator(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = ScalarFunction(ex.fun, x0, (), ex.grad, + '2-point', None, (-np.inf, np.inf)) + assert_(isinstance(approx.H, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v)) + nfev += 1 + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + def test_x_storage_overlap(self): + # Scalar_Function should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + + def f(x): + return np.sum(np.asarray(x) ** 2) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # gh13740 x is changed in user function + def ff(x): + x *= x # overwrite x + return np.sum(x) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction( + ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf) + ) + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert_equal(sf.x, np.array([1., 2., 3.])) + assert x is not sf.x + + def test_lowest_x(self): + # ScalarFunction should remember the lowest func(x) visited. + x0 = np.array([2, 3, 4]) + sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess, + None, None) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + def test_float_size(self): + x0 = np.array([2, 3, 4]).astype(np.float32) + + # check that ScalarFunction/approx_derivative always send the correct + # float width + def rosen_(x): + assert x.dtype == np.float32 + return rosen(x) + + sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + res = sf.fun(x0) + assert res.dtype == np.float32 + + +class ExVectorialFunction: + + def __init__(self): + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0], + 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype) + + def jac(self, x): + self.njev += 1 + return np.array([[4*x[0]-1, 4*x[1]], + [12*x[0]**2-3, 8*x[1]]], dtype=x.dtype) + + def hess(self, x, v): + self.nhev += 1 + return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0], + [0, 8]]) + + +class TestVectorialFunction(TestCase): + + def test_finite_difference_jac(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + + x0 = [1.0, 0.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + + x = [10, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx, decimal=4) + + x = [2.0, 1.0] + J_analit = analit.jac(x) + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(J_analit, J_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + def test_finite_difference_hess_linear_operator(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + v0 = [1.0, 2.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None, + (-np.inf, np.inf), None) + assert_(isinstance(approx.H, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p)) + nfev += 1 + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x, v0) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p), + decimal=5) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + v = [1.0, 1.0] + H_analit = analit.hess(x, v) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.jac(x) + H_analit = analit.hess(x, v0) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + v = [2.3, 5.2] + _ = analit.jac(x) + H_analit = analit.hess(x, v) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + def test_x_storage_overlap(self): + # VectorFunction should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]) + + vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None, + (-np.inf, np.inf), None) + + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x0 = np.array([1.0, 0.0]) + vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None, + (-np.inf, np.inf), None) + + with pytest.warns(UserWarning): + # filter UserWarning because ExVectorialFunction is linear and + # a quasi-Newton approximation is used for the Hessian. + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + def test_float_size(self): + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]).astype(np.float32) + + vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + + res = vf.fun(x0) + assert res.dtype == np.float32 + + res = vf.jac(x0) + assert res.dtype == np.float32 + + +def test_LinearVectorFunction(): + A_dense = np.array([ + [-1, 2, 0], + [0, 4, 2] + ]) + x0 = np.zeros(3) + A_sparse = csr_matrix(A_dense) + x = np.array([1, -1, 0]) + v = np.array([-1, 1]) + Ax = np.array([-3, -4]) + + f1 = LinearVectorFunction(A_dense, x0, None) + assert_(not f1.sparse_jacobian) + + f2 = LinearVectorFunction(A_dense, x0, True) + assert_(f2.sparse_jacobian) + + f3 = LinearVectorFunction(A_dense, x0, False) + assert_(not f3.sparse_jacobian) + + f4 = LinearVectorFunction(A_sparse, x0, None) + assert_(f4.sparse_jacobian) + + f5 = LinearVectorFunction(A_sparse, x0, True) + assert_(f5.sparse_jacobian) + + f6 = LinearVectorFunction(A_sparse, x0, False) + assert_(not f6.sparse_jacobian) + + assert_array_equal(f1.fun(x), Ax) + assert_array_equal(f2.fun(x), Ax) + assert_array_equal(f1.jac(x), A_dense) + assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray()) + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) + + +def test_LinearVectorFunction_memoization(): + A = np.array([[-1, 2, 0], [0, 4, 2]]) + x0 = np.array([1, 2, -1]) + fun = LinearVectorFunction(A, x0, False) + + assert_array_equal(x0, fun.x) + assert_array_equal(A.dot(x0), fun.f) + + x1 = np.array([-1, 3, 10]) + assert_array_equal(A, fun.jac(x1)) + assert_array_equal(x1, fun.x) + assert_array_equal(A.dot(x0), fun.f) + assert_array_equal(A.dot(x1), fun.fun(x1)) + assert_array_equal(A.dot(x1), fun.f) + + +def test_IdentityVectorFunction(): + x0 = np.zeros(3) + + f1 = IdentityVectorFunction(x0, None) + f2 = IdentityVectorFunction(x0, False) + f3 = IdentityVectorFunction(x0, True) + + assert_(f1.sparse_jacobian) + assert_(not f2.sparse_jacobian) + assert_(f3.sparse_jacobian) + + x = np.array([-1, 2, 1]) + v = np.array([-2, 3, 0]) + + assert_array_equal(f1.fun(x), x) + assert_array_equal(f2.fun(x), x) + + assert_array_equal(f1.jac(x).toarray(), np.eye(3)) + assert_array_equal(f2.jac(x), np.eye(3)) + + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..5e010dabf4d060d9a4638be3a5f6491b00d79a2f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py @@ -0,0 +1,396 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose, assert_equal + +import scipy._lib._elementwise_iterative_method as eim +from scipy import stats +from scipy.optimize._differentiate import (_differentiate as differentiate, + _EERRORINCREASE) + +class TestDifferentiate: + + def f(self, x): + return stats.norm().cdf(x) + + @pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, x): + # Invert distribution CDF and compare against distribution `ppf` + res = differentiate(self.f, x) + ref = stats.norm().pdf(x) + np.testing.assert_allclose(res.df, ref) + # This would be nice, but doesn't always work out. `error` is an + # estimate, not a bound. + assert_array_less(abs(res.df - ref), res.error) + assert res.x.shape == ref.shape + + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, case): + distname, params = case + dist = getattr(stats, distname)(*params) + x = dist.median() + 0.1 + res = differentiate(dist.cdf, x) + ref = dist.pdf(x) + assert_allclose(res.df, ref, atol=1e-10) + + @pytest.mark.parametrize('order', [1, 6]) + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, order, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + n = np.size(x) + + @np.vectorize + def _differentiate_single(x): + return differentiate(self.f, x, order=order) + + def f(x, *args, **kwargs): + f.nit += 1 + f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return self.f(x, *args, **kwargs) + f.nit = -1 + f.feval = 0 + + res = differentiate(f, x, order=order) + refs = _differentiate_single(x).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_df = [ref.df for ref in refs] + assert_allclose(res.df.ravel(), ref_df) + assert_equal(res.df.shape, shape) + + ref_error = [ref.error for ref in refs] + assert_allclose(res.error.ravel(), ref_error, atol=5e-15) + assert_equal(res.error.shape, shape) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.feval) + assert_equal(res.nfev.shape, res.x.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.nit) + assert_equal(res.nit.shape, res.x.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + rng = np.random.default_rng(5651219684984213) + def f(xs, js): + f.nit += 1 + funcs = [lambda x: x - 2.5, # converges + lambda x: np.exp(x)*rng.random(), # error increases + lambda x: np.exp(x), # reaches maxiter due to order=2 + lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN + res = [funcs[j](x) for x, j in zip(xs, js.ravel())] + return res + f.nit = 0 + + args = (np.arange(4, dtype=np.int64),) + res = differentiate(f, [1]*4, rtol=1e-14, order=2, args=args) + + ref_flags = np.array([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self): + # Same test as above but using `preserve_shape` option to simplify. + rng = np.random.default_rng(5651219684984213) + def f(x): + return [x - 2.5, # converges + np.exp(x)*rng.random(), # error increases + np.exp(x), # reaches maxiter due to order=2 + np.full_like(x, np.nan)[()]] # stops due to NaN + + res = differentiate(f, 1, rtol=1e-14, order=2, preserve_shape=True) + + ref_flags = np.array([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_preserve_shape(self): + # Test `preserve_shape` option + def f(x): + return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2] + + x = 0 + ref = [1, 3*np.cos(3*x), 1+10*np.cos(10*x), + 20*np.cos(20*x)*(x-1)**2 + 2*np.sin(20*x)*(x-1)] + res = differentiate(f, x, preserve_shape=True) + assert_allclose(res.df, ref) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + dist = stats.norm() + x = 1 + f = dist.cdf + ref = dist.pdf(x) + kwargs0 = dict(atol=0, rtol=0, order=4) + + kwargs = kwargs0.copy() + kwargs['atol'] = 1e-3 + res1 = differentiate(f, x, **kwargs) + assert_array_less(abs(res1.df - ref), 1e-3) + kwargs['atol'] = 1e-6 + res2 = differentiate(f, x, **kwargs) + assert_array_less(abs(res2.df - ref), 1e-6) + assert_array_less(abs(res2.df - ref), abs(res1.df - ref)) + + kwargs = kwargs0.copy() + kwargs['rtol'] = 1e-3 + res1 = differentiate(f, x, **kwargs) + assert_array_less(abs(res1.df - ref), 1e-3 * np.abs(ref)) + kwargs['rtol'] = 1e-6 + res2 = differentiate(f, x, **kwargs) + assert_array_less(abs(res2.df - ref), 1e-6 * np.abs(ref)) + assert_array_less(abs(res2.df - ref), abs(res1.df - ref)) + + def test_step_parameters(self): + # Test that step factors have the expected effect on accuracy + dist = stats.norm() + x = 1 + f = dist.cdf + ref = dist.pdf(x) + + res1 = differentiate(f, x, initial_step=0.5, maxiter=1) + res2 = differentiate(f, x, initial_step=0.05, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + res1 = differentiate(f, x, step_factor=2, maxiter=1) + res2 = differentiate(f, x, step_factor=20, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + # `step_factor` can be less than 1: `initial_step` is the minimum step + kwargs = dict(order=4, maxiter=1, step_direction=0) + res = differentiate(f, x, initial_step=0.5, step_factor=0.5, **kwargs) + ref = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + # This is a similar test for one-sided difference + kwargs = dict(order=2, maxiter=1, step_direction=1) + res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, + **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + kwargs['step_direction'] = -1 + res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, + **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + def test_step_direction(self): + # test that `step_direction` works as expected + def f(x): + y = np.exp(x) + y[(x < 0) + (x > 2)] = np.nan + return y + + x = np.linspace(0, 2, 10) + step_direction = np.zeros_like(x) + step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1 + res = differentiate(f, x, step_direction=step_direction) + assert_allclose(res.df, np.exp(x)) + assert np.all(res.success) + + def test_vectorized_step_direction_args(self): + # test that `step_direction` and `args` are vectorized properly + def f(x, p): + return x ** p + + def df(x, p): + return p * x ** (p - 1) + + x = np.array([1, 2, 3, 4]).reshape(-1, 1, 1) + hdir = np.array([-1, 0, 1]).reshape(1, -1, 1) + p = np.array([2, 3]).reshape(1, 1, -1) + res = differentiate(f, x, step_direction=hdir, args=(p,)) + ref = np.broadcast_to(df(x, p), res.df.shape) + assert_allclose(res.df, ref) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + x = 0.612814 + dist = stats.norm() + maxiter = 3 + + def f(x): + res = dist.cdf(x) + return res + + default_order = 8 + res = differentiate(f, x, maxiter=maxiter, rtol=1e-15) + assert not np.any(res.success) + assert np.all(res.nfev == default_order + 1 + (maxiter - 1)*2) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + assert res.df not in callback.dfs + callback.dfs.add(res.df) + assert res.status == eim._EINPROGRESS + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.dfs = set() + + res2 = differentiate(f, x, callback=callback, rtol=1e-15) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize("hdir", (-1, 0, 1)) + @pytest.mark.parametrize("x", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, hdir, x, dtype): + # Test that dtypes are preserved + x = np.asarray(x, dtype=dtype)[()] + + def f(x): + assert x.dtype == dtype + return np.exp(x) + + def callback(res): + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + + res = differentiate(f, x, order=4, step_direction=hdir, + callback=callback) + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + eps = np.finfo(dtype).eps + assert_allclose(res.df, np.exp(res.x), rtol=np.sqrt(eps)) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + differentiate(None, 1) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, -4+1j) + + message = "When `preserve_shape=False`, the shape of the array..." + with pytest.raises(ValueError, match=message): + differentiate(lambda x: [1, 2, 3], [-2, -3]) + + message = 'Tolerances and step parameters must be non-negative...' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, atol=-1) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, rtol='ekki') + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, initial_step=None) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, step_factor=object()) + + message = '`maxiter` must be a positive integer.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, maxiter=1.5) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, maxiter=0) + + message = '`order` must be a positive integer' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, order=1.5) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, order=0) + + message = '`preserve_shape` must be True or False.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, preserve_shape='herring') + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, callback='shrubbery') + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = differentiate(f, 7, rtol=1e-10) + assert res.success + assert_allclose(res.df, 99*7.**98) + + # Test that if success is achieved in the correct number + # of iterations if function is a polynomial. Ideally, all polynomials + # of order 0-2 would get exact result with 0 refinement iterations, + # all polynomials of order 3-4 would be differentiated exactly after + # 1 iteration, etc. However, it seems that _differentiate needs an + # extra iteration to detect convergence based on the error estimate. + + for n in range(6): + x = 1.5 + def f(x): + return 2*x**n + + ref = 2*n*x**(n-1) + + res = differentiate(f, x, maxiter=1, order=max(1, n)) + assert_allclose(res.df, ref, rtol=1e-15) + assert_equal(res.error, np.nan) + + res = differentiate(f, x, order=max(1, n)) + assert res.success + assert res.nit == 2 + assert_allclose(res.df, ref, rtol=1e-15) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = differentiate(f, 2, args=3) + assert_allclose(res.df, 3) + + @pytest.mark.xfail + @pytest.mark.parametrize("case", ( # function, evaluation point + (lambda x: (x - 1) ** 3, 1), + (lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1) + )) + def test_saddle_gh18811(self, case): + # With default settings, _differentiate will not always converge when + # the true derivative is exactly zero. This tests that specifying a + # (tight) `atol` alleviates the problem. See discussion in gh-18811. + atol = 1e-16 + res = differentiate(*case, step_direction=[-1, 0, 1], atol=atol) + assert np.all(res.success) + assert_allclose(res.df, 0, atol=atol) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..f131527deac44edc095be9d4d96d57fa49dadd1b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py @@ -0,0 +1,318 @@ +""" +Unit test for DIRECT optimization algorithm. +""" +from numpy.testing import (assert_allclose, + assert_array_less) +import pytest +import numpy as np +from scipy.optimize import direct, Bounds + + +class TestDIRECT: + + def setup_method(self): + self.fun_calls = 0 + self.bounds_sphere = 4*[(-2, 3)] + self.optimum_sphere_pos = np.zeros((4, )) + self.optimum_sphere = 0.0 + self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.]) + self.maxiter = 1000 + + # test functions + def sphere(self, x): + self.fun_calls += 1 + return np.square(x).sum() + + def inv(self, x): + if np.sum(x) == 0: + raise ZeroDivisionError() + return 1/np.sum(x) + + def nan_fun(self, x): + return np.nan + + def inf_fun(self, x): + return np.inf + + def styblinski_tang(self, pos): + x, y = pos + return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct(self, locally_biased): + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + # test accuracy + assert_allclose(res.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) + + # test that result lies within bounds + _bounds = np.asarray(self.bounds_sphere) + assert_array_less(_bounds[:, 0], res.x) + assert_array_less(res.x, _bounds[:, 1]) + + # test number of function evaluations. Original DIRECT overshoots by + # up to 500 evaluations in last iteration + assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1) + # test that number of function evaluations is correct + assert res.nfev == self.fun_calls + + # test that number of iterations is below supplied maximum + assert res.nit <= self.maxiter + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct_callback(self, locally_biased): + # test that callback does not change the result + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + def callback(x): + x = 2*x + dummy = np.square(x) + print("DIRECT minimization algorithm callback test") + return dummy + + res_callback = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased, + callback=callback) + + assert_allclose(res.x, res_callback.x) + + assert res.nit == res_callback.nit + assert res.nfev == res_callback.nfev + assert res.status == res_callback.status + assert res.success == res_callback.success + assert res.fun == res_callback.fun + assert_allclose(res.x, res_callback.x) + assert res.message == res_callback.message + + # test accuracy + assert_allclose(res_callback.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res_callback.fun, self.optimum_sphere, + atol=1e-5, rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_exception(self, locally_biased): + bounds = 4*[(-10, 10)] + with pytest.raises(ZeroDivisionError): + direct(self.inv, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_nan(self, locally_biased): + bounds = 4*[(-10, 10)] + direct(self.nan_fun, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("len_tol", [1e-3, 1e-4]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_len_tol(self, len_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, len_tol=len_tol, + vol_tol=1e-30, locally_biased=locally_biased) + assert res.status == 5 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The side length measure of the hyperrectangle containing " + "the lowest function value found is below " + f"len_tol={len_tol}") + assert res.message == message + + @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_vol_tol(self, vol_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol, + len_tol=0., locally_biased=locally_biased) + assert res.status == 4 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The volume of the hyperrectangle containing the lowest " + f"function value found is below vol_tol={vol_tol}") + assert res.message == message + + @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_min(self, f_min_rtol, locally_biased): + # test that desired function value is reached within + # relative tolerance of f_min_rtol + f_min = 1. + bounds = 4*[(-2., 10.)] + res = direct(self.sphere, bounds=bounds, f_min=f_min, + f_min_rtol=f_min_rtol, + locally_biased=locally_biased) + assert res.status == 3 + assert res.success + assert res.fun < f_min * (1. + f_min_rtol) + message = ("The best function value found is within a relative " + f"error={f_min_rtol} of the (known) global optimum f_min") + assert res.message == message + + def circle_with_args(self, x, a, b): + return np.square(x[0] - a) + np.square(x[1] - b).sum() + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_circle_with_args(self, locally_biased): + bounds = 2*[(-2.0, 2.0)] + + res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250, + locally_biased=locally_biased) + assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxfun(self, locally_biased): + # test that if optimization runs for the maximal number of + # evaluations, success = False is returned + + maxfun = 100 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun, locally_biased=locally_biased) + assert result.success is False + assert result.status == 1 + assert result.nfev >= maxfun + message = ("Number of function evaluations done is " + f"larger than maxfun={maxfun}") + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxiter(self, locally_biased): + # test that if optimization runs for the maximal number of + # iterations, success = False is returned + + maxiter = 10 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter, locally_biased=locally_biased) + assert result.success is False + assert result.status == 2 + assert result.nit >= maxiter + message = f"Number of iterations is larger than maxiter={maxiter}" + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_bounds_variants(self, locally_biased): + # test that new and old bounds yield same result + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + x_opt = np.array([-1., 1., 0.]) + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = direct(self.sphere, bounds_old, + locally_biased=locally_biased) + res_new_bounds = direct(self.sphere, bounds_new, + locally_biased=locally_biased) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + assert res_new_bounds.nit == res_old_bounds.nit + assert_allclose(res_new_bounds.x, res_old_bounds.x) + assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2) + + @pytest.mark.parametrize("locally_biased", [True, False]) + @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3]) + def test_epsilon(self, eps, locally_biased): + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + eps=eps, vol_tol=1e-6, + locally_biased=locally_biased) + assert result.status == 4 + assert result.success + + @pytest.mark.xslow + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_no_segmentation_fault(self, locally_biased): + # test that an excessive number of function evaluations + # does not result in segmentation fault + bounds = [(-5., 20.)] * 100 + result = direct(self.sphere, bounds, maxfun=10000000, + maxiter=1000000, locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_inf_fun(self, locally_biased): + # test that an objective value of infinity does not crash DIRECT + bounds = [(-5., 5.)] * 2 + result = direct(self.inf_fun, bounds, + locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("len_tol", [-1, 2]) + def test_len_tol_validation(self, len_tol): + error_msg = "len_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + len_tol=len_tol) + + @pytest.mark.parametrize("vol_tol", [-1, 2]) + def test_vol_tol_validation(self, vol_tol): + error_msg = "vol_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + vol_tol=vol_tol) + + @pytest.mark.parametrize("f_min_rtol", [-1, 2]) + def test_fmin_rtol_validation(self, f_min_rtol): + error_msg = "f_min_rtol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + f_min_rtol=f_min_rtol, f_min=0.) + + @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)]) + def test_maxfun_wrong_type(self, maxfun): + error_msg = "maxfun must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun) + + @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)]) + def test_maxiter_wrong_type(self, maxiter): + error_msg = "maxiter must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter) + + def test_negative_maxiter(self): + error_msg = "maxiter must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=-1) + + def test_negative_maxfun(self): + error_msg = "maxfun must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=-1) + + @pytest.mark.parametrize("bounds", ["bounds", 2., 0]) + def test_invalid_bounds_type(self, bounds): + error_msg = ("bounds must be a sequence or " + "instance of Bounds class") + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("bounds", + [Bounds([-1., -1], [-2, 1]), + Bounds([-np.nan, -1], [-2, np.nan]), + ] + ) + def test_incorrect_bounds(self, bounds): + error_msg = 'Bounds are not consistent min < max' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + def test_inf_bounds(self): + error_msg = 'Bounds must not be inf.' + bounds = Bounds([-np.inf, -1], [-2, np.inf]) + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.]) + def test_locally_biased_validation(self, locally_biased): + error_msg = 'locally_biased must be True or False.' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + locally_biased=locally_biased) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..e6872997881c78b43a04d8eb4beed6cf7e758cb1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py @@ -0,0 +1,208 @@ +import numpy as np +from copy import deepcopy +from numpy.linalg import norm +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less) +from scipy.optimize import (BFGS, SR1) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +class TestHessianUpdateStrategy(TestCase): + + def test_hessian_initialization(self): + quasi_newton = (BFGS(), SR1()) + + for qn in quasi_newton: + qn.initialize(5, 'hess') + B = qn.get_matrix() + + assert_array_equal(B, np.eye(5)) + + # For this list of points, it is known + # that no exception occur during the + # Hessian update. Hence no update is + # skipped or damped. + def test_rosenbrock_with_no_exception(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338], + [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691], + [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041], + [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744], + [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623], + [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448], + [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437], + [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581], + [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553], + [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149], + [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663], + [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288], + [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356], + [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912], + [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305], + [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047], + [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297], + [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032], + [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786], + [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + # Check curvature condition + for s, y in zip(delta_x, delta_grad): + if np.dot(s, y) <= 0: + raise ArithmeticError() + # Define QuasiNewton update + for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4), + SR1(init_scale=1)): + hess = deepcopy(quasi_newton) + inv_hess = deepcopy(quasi_newton) + hess.initialize(len(x_list[0]), 'hess') + inv_hess.initialize(len(x_list[0]), 'inv_hess') + # Compare the hessian and its inverse + for s, y in zip(delta_x, delta_grad): + hess.update(s, y) + inv_hess.update(s, y) + B = hess.get_matrix() + H = inv_hess.get_matrix() + assert_array_almost_equal(np.linalg.inv(B), H, decimal=10) + B_true = prob.hess(x_list[len(delta_x)]) + assert_array_less(norm(B - B_true)/norm(B_true), 0.1) + + def test_SR1_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = SR1(init_scale=1, min_denominator=1e-2) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[17] + y = delta_grad[17] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) + + def test_BFGS_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = BFGS(init_scale=1, min_curvature=10) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[5] + y = delta_grad[5] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..ac574b34b1a10d29d7d74a3b172de116f6446f86 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py @@ -0,0 +1,165 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import pytest + +from scipy.optimize._pava_pybind import pava +from scipy.optimize import isotonic_regression + + +class TestIsotonicRegression: + @pytest.mark.parametrize( + ("y", "w", "msg"), + [ + ([[0, 1]], None, + "array has incorrect number of dimensions: 2; expected 1"), + ([0, 1], [[1, 2]], + "Input arrays y and w must have one dimension of equal length"), + ([0, 1], [1], + "Input arrays y and w must have one dimension of equal length"), + (1, 2, + "Input arrays y and w must have one dimension of equal length"), + ([0, 1], [0, 1], + "Weights w must be strictly positive"), + ] + ) + def test_raise_error(self, y, w, msg): + with pytest.raises(ValueError, match=msg): + isotonic_regression(y=y, weights=w) + + def test_simple_pava(self): + # Test case of Busing 2020 + # https://doi.org/10.18637/jss.v102.c01 + y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64) + w = np.ones_like(y) + r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp) + pava(y, w, r) + assert_allclose(y, [4, 4, 4, 4, 4, 4, 8]) + # Only first 2 elements of w are changed. + assert_allclose(w, [6, 1, 1, 1, 1, 1, 1]) + # Only first 3 elements of r are changed. + assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1]) + + @pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32]) + @pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32]) + @pytest.mark.parametrize("w", [None, "ones"]) + def test_simple_isotonic_regression(self, w, w_dtype, y_dtype): + # Test case of Busing 2020 + # https://doi.org/10.18637/jss.v102.c01 + y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype) + if w is not None: + w = np.ones_like(y, dtype=w_dtype) + res = isotonic_regression(y, weights=w) + assert res.x.dtype == np.float64 + assert res.weights.dtype == np.float64 + assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8]) + assert_allclose(res.weights, [6, 1]) + assert_allclose(res.blocks, [0, 6, 7]) + # Assert that y was not overwritten + assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)) + + @pytest.mark.parametrize("increasing", [True, False]) + def test_linspace(self, increasing): + n = 10 + y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n) + res = isotonic_regression(y, increasing=increasing) + assert_allclose(res.x, y) + assert_allclose(res.blocks, np.arange(n + 1)) + + def test_weights(self): + w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3]) + y = np.array([3, 2, 1, 10, 9, 8, 20, 10]) + res = isotonic_regression(y, weights=w) + assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4]) + assert_allclose(res.weights, [8, 1.5, 4]) + assert_allclose(res.blocks, [0, 3, 6, 8]) + + # weights are like repeated observations, we repeat the 3rd element 5 + # times. + w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3]) + y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10]) + res2 = isotonic_regression(y2, weights=w2) + assert_allclose(np.diff(res2.x[0:7]), 0) + assert_allclose(res2.x[4:], res.x) + assert_allclose(res2.weights, res.weights) + assert_allclose(res2.blocks[1:] - 4, res.blocks[1:]) + + def test_against_R_monotone(self): + y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4] + res = isotonic_regression(y) + # R code + # library(monotone) + # options(digits=8) + # monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4)) + x_R = [ + 0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667, + 4.1666667, 6.6666667, 6.6666667, 6.6666667, + ] + assert_allclose(res.x, x_R) + assert_equal(res.blocks, [0, 1, 7, 10]) + + n = 100 + y = np.linspace(0, 1, num=n, endpoint=False) + y = 5 * y + np.sin(10 * y) + res = isotonic_regression(y) + # R code + # library(monotone) + # n <- 100 + # y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n) + # options(digits=8) + # monotone(y) + x_R = [ + 0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554, + 0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736, + 1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, + 1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256, + 1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334, + 2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999, + 3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904, + 4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, + 4.86564130, 4.86564130, 4.86564130, 4.86564130, + ] + assert_allclose(res.x, x_R) + + # Test increasing + assert np.all(np.diff(res.x) >= 0) + + # Test balance property: sum(y) == sum(x) + assert_allclose(np.sum(res.x), np.sum(y)) + + # Reverse order + res_inv = isotonic_regression(-y, increasing=False) + assert_allclose(-res_inv.x, res.x) + assert_equal(res_inv.blocks, res.blocks) + + def test_readonly(self): + x = np.arange(3, dtype=float) + w = np.ones(3, dtype=float) + + x.flags.writeable = False + w.flags.writeable = False + + res = isotonic_regression(x, weights=w) + assert np.all(np.isfinite(res.x)) + assert np.all(np.isfinite(res.weights)) + assert np.all(np.isfinite(res.blocks)) + + def test_non_contiguous_arrays(self): + x = np.arange(10, dtype=float)[::3] + w = np.ones(10, dtype=float)[::3] + assert not x.flags.c_contiguous + assert not x.flags.f_contiguous + assert not w.flags.c_contiguous + assert not w.flags.f_contiguous + + res = isotonic_regression(x, weights=w) + assert np.all(np.isfinite(res.x)) + assert np.all(np.isfinite(res.weights)) + assert np.all(np.isfinite(res.blocks)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py new file mode 100644 index 0000000000000000000000000000000000000000..8e4452cd61c5400c13f4f239055352bae754ad7e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py @@ -0,0 +1,43 @@ +import numpy as np +from numpy.testing import assert_allclose +import scipy.linalg +from scipy.optimize import minimize + + +def test_1(): + def f(x): + return x**4, 4*x**3 + + for gtol in [1e-8, 1e-12, 1e-20]: + for maxcor in range(20, 35): + result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20, + options={'gtol': gtol, 'maxcor': maxcor}) + + H1 = result.hess_inv(np.array([1])).reshape(1,1) + H2 = result.hess_inv.todense() + + assert_allclose(H1, H2) + + +def test_2(): + H0 = [[3, 0], [1, 2]] + + def f(x): + return np.dot(x, np.dot(scipy.linalg.inv(H0), x)) + + result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20]) + result2 = minimize(fun=f, method='BFGS', x0=[10, 20]) + + H1 = result1.hess_inv.todense() + + H2 = np.vstack(( + result1.hess_inv(np.array([1, 0])), + result1.hess_inv(np.array([0, 1])))) + + assert_allclose( + result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1), + result1.hess_inv(np.array([1, 0]))) + assert_allclose(H1, H2) + assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03) + + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py new file mode 100644 index 0000000000000000000000000000000000000000..5b2a75684a27f31c07d5b8b20bd757385554edef --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py @@ -0,0 +1,128 @@ +import numpy as np +from scipy.optimize import _lbfgsb, minimize + + +def objfun(x): + """simplified objective func to test lbfgsb bound violation""" + x0 = [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085] + x1 = [1.0, 0.0, 1.0, 0.0, 0.0] + x2 = [1.0, + 0.0, + 0.9889733043149325, + 0.0, + 0.026353554421041155] + x3 = [1.0, + 0.0, + 0.9889917442915558, + 0.0, + 0.020341986743231205] + + f0 = 5163.647901211178 + f1 = 5149.8181642072905 + f2 = 5149.379332309634 + f3 = 5149.374490771297 + + g0 = np.array([-0.5934820547965749, + 1.6251549718258351, + -71.99168459202559, + 5.346636965797545, + 37.10732723092604]) + g1 = np.array([-0.43295349282641515, + 1.008607936794592, + 18.223666726602975, + 31.927010036981997, + -19.667512518739386]) + g2 = np.array([-0.4699874455100256, + 0.9466285353668347, + -0.016874360242016825, + 48.44999161133457, + 5.819631620590712]) + g3 = np.array([-0.46970678696829116, + 0.9612719312174818, + 0.006129809488833699, + 48.43557729419473, + 6.005481418498221]) + + if np.allclose(x, x0): + f = f0 + g = g0 + elif np.allclose(x, x1): + f = f1 + g = g1 + elif np.allclose(x, x2): + f = f2 + g = g2 + elif np.allclose(x, x3): + f = f3 + g = g3 + else: + raise ValueError( + 'Simplified objective function not defined ' + 'at requested point') + return (np.copy(f), np.copy(g)) + + +def test_setulb_floatround(): + """test if setulb() violates bounds + + checks for violation due to floating point rounding error + """ + + n = 5 + m = 10 + factr = 1e7 + pgtol = 1e-5 + maxls = 20 + iprint = -1 + nbd = np.full((n,), 2) + low_bnd = np.zeros(n, np.float64) + upper_bnd = np.ones(n, np.float64) + + x0 = np.array( + [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085]) + x = np.copy(x0) + + f = np.array(0.0, np.float64) + g = np.zeros(n, np.float64) + + fortran_int = _lbfgsb.types.intvar.dtype + + wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64) + iwa = np.zeros(3*n, fortran_int) + task = np.zeros(1, 'S60') + csave = np.zeros(1, 'S60') + lsave = np.zeros(4, fortran_int) + isave = np.zeros(44, fortran_int) + dsave = np.zeros(29, np.float64) + + task[:] = b'START' + + for n_iter in range(7): # 7 steps required to reproduce error + f, g = objfun(x) + + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + + assert (x <= upper_bnd).all() and (x >= low_bnd).all(), ( + "_lbfgsb.setulb() stepped to a point outside of the bounds") + + +def test_gh_issue18730(): + # issue 18730 reported that l-bfgs-b did not work with objectives + # returning single precision gradient arrays + def fun_single_precision(x): + x = x.astype(np.float32) + return np.sum(x**2), (2*x) + + res = minimize(fun_single_precision, x0=np.array([1., 1.]), jac=True, + method="l-bfgs-b") + np.testing.assert_allclose(res.fun, 0., atol=1e-15) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py new file mode 100644 index 0000000000000000000000000000000000000000..114e95d9a29f88768968a599be62f435325f81a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py @@ -0,0 +1,871 @@ +from itertools import product + +import numpy as np +from numpy.linalg import norm +from numpy.testing import (assert_, assert_allclose, + assert_equal, suppress_warnings) +from pytest import raises as assert_raises +from scipy.sparse import issparse, lil_matrix +from scipy.sparse.linalg import aslinearoperator + +from scipy.optimize import least_squares, Bounds +from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES +from scipy.optimize._lsq.common import EPS, make_strictly_feasible, CL_scaling_vector + + +def fun_trivial(x, a=0): + return (x - a)**2 + 5.0 + + +def jac_trivial(x, a=0.0): + return 2 * (x - a) + + +def fun_2d_trivial(x): + return np.array([x[0], x[1]]) + + +def jac_2d_trivial(x): + return np.identity(2) + + +def fun_rosenbrock(x): + return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + +def jac_rosenbrock(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0] + ]) + + +def jac_rosenbrock_bad_dim(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0], + [0.0, 0.0] + ]) + + +def fun_rosenbrock_cropped(x): + return fun_rosenbrock(x)[0] + + +def jac_rosenbrock_cropped(x): + return jac_rosenbrock(x)[0] + + +# When x is 1-D array, return is 2-D array. +def fun_wrong_dimensions(x): + return np.array([x, x**2, x**3]) + + +def jac_wrong_dimensions(x, a=0.0): + return np.atleast_3d(jac_trivial(x, a=a)) + + +def fun_bvp(x): + n = int(np.sqrt(x.shape[0])) + u = np.zeros((n + 2, n + 2)) + x = x.reshape((n, n)) + u[1:-1, 1:-1] = x + y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3 + return y.ravel() + + +class BroydenTridiagonal: + def __init__(self, n=100, mode='sparse'): + np.random.seed(0) + + self.n = n + + self.x0 = -np.ones(n) + self.lb = np.linspace(-2, -1.5, n) + self.ub = np.linspace(-0.8, 0.0, n) + + self.lb += 0.1 * np.random.randn(n) + self.ub += 0.1 * np.random.randn(n) + + self.x0 += 0.1 * np.random.randn(n) + self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub) + + if mode == 'sparse': + self.sparsity = lil_matrix((n, n), dtype=int) + i = np.arange(n) + self.sparsity[i, i] = 1 + i = np.arange(1, n) + self.sparsity[i, i - 1] = 1 + i = np.arange(n - 1) + self.sparsity[i, i + 1] = 1 + + self.jac = self._jac + elif mode == 'operator': + self.jac = lambda x: aslinearoperator(self._jac(x)) + elif mode == 'dense': + self.sparsity = None + self.jac = lambda x: self._jac(x).toarray() + else: + assert_(False) + + def fun(self, x): + f = (3 - x) * x + 1 + f[1:] -= x[:-1] + f[:-1] -= 2 * x[1:] + return f + + def _jac(self, x): + J = lil_matrix((self.n, self.n)) + i = np.arange(self.n) + J[i, i] = 3 - 2 * x + i = np.arange(1, self.n) + J[i, i - 1] = -1 + i = np.arange(self.n - 1) + J[i, i + 1] = -2 + return J + + +class ExponentialFittingProblem: + """Provide data and function for exponential fitting in the form + y = a + exp(b * x) + noise.""" + + def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1), + n_points=11, random_seed=None): + np.random.seed(random_seed) + self.m = n_points + self.n = 2 + + self.p0 = np.zeros(2) + self.x = np.linspace(x_range[0], x_range[1], n_points) + + self.y = a + np.exp(b * self.x) + self.y += noise * np.random.randn(self.m) + + outliers = np.random.randint(0, self.m, n_outliers) + self.y[outliers] += 50 * noise * np.random.rand(n_outliers) + + self.p_opt = np.array([a, b]) + + def fun(self, p): + return p[0] + np.exp(p[1] * self.x) - self.y + + def jac(self, p): + J = np.empty((self.m, self.n)) + J[:, 0] = 1 + J[:, 1] = self.x * np.exp(p[1] * self.x) + return J + + +def cubic_soft_l1(z): + rho = np.empty((3, z.size)) + + t = 1 + z + rho[0] = 3 * (t**(1/3) - 1) + rho[1] = t ** (-2/3) + rho[2] = -2/3 * t**(-5/3) + + return rho + + +LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1] + + +class BaseMixin: + def test_basic(self): + # Test that the basic calling sequence works. + res = least_squares(fun_trivial, 2., method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.fun, fun_trivial(res.x)) + + def test_args_kwargs(self): + # Test that args and kwargs are passed correctly to the functions. + a = 3.0 + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, args=(a,), + method=self.method) + res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a}, + method=self.method) + + assert_allclose(res.x, a, rtol=1e-4) + assert_allclose(res1.x, a, rtol=1e-4) + + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + args=(3, 4,), method=self.method) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + kwargs={'kaboom': 3}, method=self.method) + + def test_jac_options(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops', + method=self.method) + + def test_nfev_options(self): + for max_nfev in [None, 20]: + res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev, + method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + def test_x_scale_options(self): + for x_scale in [1.0, np.array([0.5]), 'jac']: + res = least_squares(fun_trivial, 2.0, x_scale=x_scale) + assert_allclose(res.x, 0) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale='auto', method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=-1.0, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=None, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=1.0+2.0j, method=self.method) + + def test_diff_step(self): + # res1 and res2 should be equivalent. + # res2 and res3 should be different. + res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1, + method=self.method) + res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1, + method=self.method) + res3 = least_squares(fun_trivial, 2.0, + diff_step=None, method=self.method) + assert_allclose(res1.x, 0, atol=1e-4) + assert_allclose(res2.x, 0, atol=1e-4) + assert_allclose(res3.x, 0, atol=1e-4) + assert_equal(res1.x, res2.x) + assert_equal(res1.nfev, res2.nfev) + + def test_incorrect_options_usage(self): + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'no_such_option': 100}) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'max_nfev': 100}) + + def test_full_result(self): + # MINPACK doesn't work very well with factor=100 on this problem, + # thus using low 'atol'. + res = least_squares(fun_trivial, 2.0, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.cost, 12.5) + assert_allclose(res.fun, 5) + assert_allclose(res.jac, 0, atol=1e-4) + assert_allclose(res.grad, 0, atol=1e-2) + assert_allclose(res.optimality, 0, atol=1e-2) + assert_equal(res.active_mask, 0) + if self.method == 'lm': + assert_(res.nfev < 30) + assert_(res.njev is None) + else: + assert_(res.nfev < 10) + assert_(res.njev < 10) + assert_(res.status > 0) + assert_(res.success) + + def test_full_result_single_fev(self): + # MINPACK checks the number of nfev after the iteration, + # so it's hard to tell what he is going to compute. + if self.method == 'lm': + return + + res = least_squares(fun_trivial, 2.0, method=self.method, + max_nfev=1) + assert_equal(res.x, np.array([2])) + assert_equal(res.cost, 40.5) + assert_equal(res.fun, np.array([9])) + assert_equal(res.jac, np.array([[4]])) + assert_equal(res.grad, np.array([36])) + assert_equal(res.optimality, 36) + assert_equal(res.active_mask, np.array([0])) + assert_equal(res.nfev, 1) + assert_equal(res.njev, 1) + assert_equal(res.status, 0) + assert_equal(res.success, 0) + + def test_rosenbrock(self): + x0 = [-2, 1] + x_opt = [1, 1] + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.x, x_opt) + + def test_rosenbrock_cropped(self): + x0 = [-2, 1] + if self.method == 'lm': + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, + x0, method='lm') + else: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock_cropped], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + res = least_squares( + fun_rosenbrock_cropped, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.cost, 0, atol=1e-14) + + def test_fun_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_wrong_dimensions, + 2.0, method=self.method) + + def test_jac_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, jac_wrong_dimensions, method=self.method) + + def test_fun_and_jac_inconsistent_dimensions(self): + x0 = [1, 2] + assert_raises(ValueError, least_squares, fun_rosenbrock, x0, + jac_rosenbrock_bad_dim, method=self.method) + + def test_x0_multidimensional(self): + x0 = np.ones(4).reshape(2, 2) + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_scalar(self): + x0 = 2.0 + 0.0*1j + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_array(self): + x0 = [1.0, 2.0 + 0.0*1j] + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_bvp(self): + # This test was introduced with fix #5556. It turned out that + # dogbox solver had a bug with trust-region radius update, which + # could block its progress and create an infinite loop. And this + # discrete boundary value problem is the one which triggers it. + n = 10 + x0 = np.ones(n**2) + if self.method == 'lm': + max_nfev = 5000 # To account for Jacobian estimation. + else: + max_nfev = 100 + res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method, + max_nfev=max_nfev) + + assert_(res.nfev < max_nfev) + assert_(res.cost < 0.5) + + def test_error_raised_when_all_tolerances_below_eps(self): + # Test that all 0 tolerances are not allowed. + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method=self.method, ftol=None, xtol=None, gtol=None) + + def test_convergence_with_only_one_tolerance_enabled(self): + if self.method == 'lm': + return # should not do test + x0 = [-2, 1] + x_opt = [1, 1] + for ftol, xtol, gtol in [(1e-8, None, None), + (None, 1e-8, None), + (None, None, 1e-8)]: + res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock, + ftol=ftol, gtol=gtol, xtol=xtol, + method=self.method) + assert_allclose(res.x, x_opt) + + +class BoundsMixin: + def test_inconsistent(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(10.0, 0.0), method=self.method) + + def test_infeasible(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(3., 4), method=self.method) + + def test_wrong_number(self): + assert_raises(ValueError, least_squares, fun_trivial, 2., + bounds=(1., 2, 3), method=self.method) + + def test_inconsistent_shape(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(1.0, [2.0, 3.0]), method=self.method) + # 1-D array wont't be broadcasted + assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0], + bounds=([0.0], [3.0, 4.0]), method=self.method) + + def test_in_bounds(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(-1.0, 3.0), method=self.method) + assert_allclose(res.x, 0.0, atol=1e-4) + assert_equal(res.active_mask, [0]) + assert_(-1 <= res.x <= 3) + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(0.5, 3.0), method=self.method) + assert_allclose(res.x, 0.5, atol=1e-4) + assert_equal(res.active_mask, [-1]) + assert_(0.5 <= res.x <= 3) + + def test_bounds_shape(self): + def get_bounds_direct(lb, ub): + return lb, ub + + def get_bounds_instances(lb, ub): + return Bounds(lb, ub) + + for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]: + for bounds_func in [get_bounds_direct, get_bounds_instances]: + x0 = [1.0, 1.0] + res = least_squares(fun_2d_trivial, x0, jac=jac) + assert_allclose(res.x, [0.0, 0.0]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func(0.5, [2.0, 2.0]), + method=self.method) + assert_allclose(res.x, [0.5, 0.5]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([0.3, 0.2], 3.0), + method=self.method) + assert_allclose(res.x, [0.3, 0.2]) + res = least_squares( + fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([-1, 0.5], [1.0, 3.0]), + method=self.method) + assert_allclose(res.x, [0.0, 0.5], atol=1e-5) + + def test_bounds_instances(self): + res = least_squares(fun_trivial, 0.5, bounds=Bounds()) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0)) + assert_allclose(res.x, 1.0, atol=1e-4) + + res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0)) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0)) + assert_allclose(res.x, -1.0, atol=1e-4) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[-1.0, -1.0], ub=1.0)) + assert_allclose(res.x, [0.0, 0.0], atol=1e-5) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[0.1, 0.1])) + assert_allclose(res.x, [0.1, 0.1], atol=1e-5) + + def test_rosenbrock_bounds(self): + x0_1 = np.array([-2.0, 1.0]) + x0_2 = np.array([2.0, 2.0]) + x0_3 = np.array([-2.0, 2.0]) + x0_4 = np.array([0.0, 2.0]) + x0_5 = np.array([-1.2, 1.0]) + problems = [ + (x0_1, ([-np.inf, -1.5], np.inf)), + (x0_2, ([-np.inf, 1.5], np.inf)), + (x0_3, ([-np.inf, 1.5], np.inf)), + (x0_4, ([-np.inf, 1.5], [1.0, np.inf])), + (x0_2, ([1.0, 1.5], [3.0, 3.0])), + (x0_5, ([-50.0, 0.0], [0.5, 100])) + ] + for x0, bounds in problems: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, [1.0, 0.5], 'jac'], + ['exact', 'lsmr']): + res = least_squares(fun_rosenbrock, x0, jac, bounds, + x_scale=x_scale, tr_solver=tr_solver, + method=self.method) + assert_allclose(res.optimality, 0.0, atol=1e-5) + + +class SparseMixin: + def test_exact_tr_solver(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='exact', method=self.method) + assert_raises(ValueError, least_squares, p.fun, p.x0, + tr_solver='exact', jac_sparsity=p.sparsity, + method=self.method) + + def test_equivalence(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares( + sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares( + dense.fun, dense.x0, jac=sparse.jac, + method=self.method) + assert_equal(res_sparse.nfev, res_dense.nfev) + assert_allclose(res_sparse.x, res_dense.x, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + + def test_tr_options(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + tr_options={'btol': 1e-10}) + assert_allclose(res.cost, 0, atol=1e-20) + + def test_wrong_parameters(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='best', method=self.method) + assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac, + tr_solver='lsmr', tr_options={'tol': 1e-10}) + + def test_solver_selection(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac, + method=self.method) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_(issparse(res_sparse.jac)) + assert_(isinstance(res_dense.jac, np.ndarray)) + + def test_numerical_jac(self): + p = BroydenTridiagonal() + for jac in ['2-point', '3-point', 'cs']: + res_dense = least_squares(p.fun, p.x0, jac, method=self.method) + res_sparse = least_squares( + p.fun, p.x0, jac,method=self.method, + jac_sparsity=p.sparsity) + assert_equal(res_dense.nfev, res_sparse.nfev) + assert_allclose(res_dense.x, res_sparse.x, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + + def test_with_bounds(self): + p = BroydenTridiagonal() + for jac, jac_sparsity in product( + [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]): + res_1 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, np.inf), + method=self.method,jac_sparsity=jac_sparsity) + res_2 = least_squares( + p.fun, p.x0, jac, bounds=(-np.inf, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + res_3 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + assert_allclose(res_1.optimality, 0, atol=1e-10) + assert_allclose(res_2.optimality, 0, atol=1e-10) + assert_allclose(res_3.optimality, 0, atol=1e-10) + + def test_wrong_jac_sparsity(self): + p = BroydenTridiagonal() + sparsity = p.sparsity[:-1] + assert_raises(ValueError, least_squares, p.fun, p.x0, + jac_sparsity=sparsity, method=self.method) + + def test_linear_operator(self): + p = BroydenTridiagonal(mode='operator') + res = least_squares(p.fun, p.x0, p.jac, method=self.method) + assert_allclose(res.cost, 0.0, atol=1e-20) + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, tr_solver='exact') + + def test_x_scale_jac_scale(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + x_scale='jac') + assert_allclose(res.cost, 0.0, atol=1e-20) + + p = BroydenTridiagonal(mode='operator') + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, x_scale='jac') + + +class LossFunctionMixin: + def test_options(self): + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_allclose(res.x, 0, atol=1e-15) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + loss='hinge', method=self.method) + + def test_fun(self): + # Test that res.fun is actual residuals, and not modified by loss + # function stuff. + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_equal(res.fun, fun_trivial(res.x)) + + def test_grad(self): + # Test that res.grad is true gradient of loss function at the + # solution. Use max_nfev = 1, to avoid reaching minimum. + x = np.array([2.0]) # res.x will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x * (x**2 + 5)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x) + + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5) + + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4)) + + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3)) + + def test_jac(self): + # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation + # of Hessian. This approximation is computed by doubly differentiating + # the cost function and dropping the part containing second derivative + # of f. For a scalar function it is computed as + # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the + # brackets is less than EPS it is replaced by EPS. Here, we check + # against the root of H. + + x = 2.0 # res.x will be this. + f = x**2 + 5 # res.fun will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x) + + # For `huber` loss the Jacobian correction is identically zero + # in outlier region, in such cases it is modified to be equal EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x * EPS**0.5) + + # Now, let's apply `loss_scale` to turn the residual into an inlier. + # The loss function becomes linear. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + f_scale=10, max_nfev=1) + assert_equal(res.jac, 2 * x) + + # 'soft_l1' always gives a positive scaling. + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75) + + # For 'cauchy' the correction term turns out to be negative, and it + # replaced by EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Now use scaling to turn the residual to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + f_scale=10, max_nfev=1, method=self.method) + fs = f / 10 + assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2)) + + # 'arctan' gives an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + f_scale=20.0, max_nfev=1, method=self.method) + fs = f / 20 + assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4)) + + # cubic_soft_l1 will give an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, + loss=cubic_soft_l1, f_scale=6, max_nfev=1) + fs = f / 6 + assert_allclose(res.jac, + 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6)) + + def test_robustness(self): + for noise in [0.1, 1.0]: + p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0) + + for jac in ['2-point', '3-point', 'cs', p.jac]: + res_lsq = least_squares(p.fun, p.p0, jac=jac, + method=self.method) + assert_allclose(res_lsq.optimality, 0, atol=1e-2) + for loss in LOSSES: + if loss == 'linear': + continue + res_robust = least_squares( + p.fun, p.p0, jac=jac, loss=loss, f_scale=noise, + method=self.method) + assert_allclose(res_robust.optimality, 0, atol=1e-2) + assert_(norm(res_robust.x - p.p_opt) < + norm(res_lsq.x - p.p_opt)) + + +class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'dogbox' + + +class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'trf' + + def test_lsmr_regularization(self): + p = BroydenTridiagonal() + for regularize in [True, False]: + res = least_squares(p.fun, p.x0, p.jac, method='trf', + tr_options={'regularize': regularize}) + assert_allclose(res.cost, 0, atol=1e-20) + + +class TestLM(BaseMixin): + method = 'lm' + + def test_bounds_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, bounds=(-3.0, 3.0), method='lm') + + def test_m_less_n_not_supported(self): + x0 = [-2, 1] + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, + method='lm') + + def test_sparse_not_supported(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_jac_sparsity_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + jac_sparsity=[1], method='lm') + + def test_LinearOperator_not_supported(self): + p = BroydenTridiagonal(mode="operator") + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_loss(self): + res = least_squares(fun_trivial, 2.0, loss='linear', method='lm') + assert_allclose(res.x, 0.0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method='lm', loss='huber') + + +def test_basic(): + # test that 'method' arg is really optional + res = least_squares(fun_trivial, 2.0) + assert_allclose(res.x, 0, atol=1e-10) + + +def test_small_tolerances_for_lm(): + for ftol, xtol, gtol in [(None, 1e-13, 1e-13), + (1e-13, None, 1e-13), + (1e-13, 1e-13, None)]: + assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol, + ftol=ftol, gtol=gtol, method='lm') + + +def test_fp32_gh12991(): + # checks that smaller FP sizes can be used in least_squares + # this is the minimum working example reported for gh12991 + np.random.seed(1) + + x = np.linspace(0, 1, 100).astype("float32") + y = np.random.random(100).astype("float32") + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + res = least_squares(err, [-1.0, -1.0], args=(x, y)) + # previously the initial jacobian calculated for this would be all 0 + # and the minimize would terminate immediately, with nfev=1, would + # report a successful minimization (it shouldn't have done), but be + # unchanged from the initial solution. + # It was terminating early because the underlying approx_derivative + # used a step size for FP64 when the working space was FP32. + assert res.nfev > 2 + assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5) + + +def test_gh_18793_and_19351(): + answer = 1e-12 + initial_guess = 1.1e-12 + + def chi2(x): + return (x-answer)**2 + + gtol = 1e-15 + res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf)) + # Original motivation: gh-18793 + # if we choose an initial condition that is close to the solution + # we shouldn't return an answer that is further away from the solution + + # Update: gh-19351 + # However this requirement does not go well with 'trf' algorithm logic. + # Some regressions were reported after the presumed fix. + # The returned solution is good as long as it satisfies the convergence + # conditions. + # Specifically in this case the scaled gradient will be sufficiently low. + + scaling, _ = CL_scaling_vector(res.x, res.grad, + np.atleast_1d(0), np.atleast_1d(np.inf)) + assert res.status == 1 # Converged by gradient + assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol + + +def test_gh_19103(): + # Checks that least_squares trf method selects a strictly feasible point, + # and thus succeeds instead of failing, + # when the initial guess is reported exactly at a boundary point. + # This is a reduced example from gh191303 + + ydata = np.array([0.] * 66 + [ + 1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1., + 1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1., + 0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4., + 6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11., + 10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29., + 28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52., + 67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107., + 108., 118., 96., 115., 138., 137., + ]) + xdata = np.arange(0, ydata.size) * 0.1 + + def exponential_wrapped(params): + A, B, x0 = params + return A * np.exp(B * (xdata - x0)) - ydata + + x0 = [0.01, 1., 5.] + bounds = ((0.01, 0, 0), (np.inf, 10, 20.9)) + res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds) + assert res.success diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..d59792da9eef38e313eaa0bca70f873627f8d3cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py @@ -0,0 +1,116 @@ +# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck +# License: BSD + +from numpy.testing import assert_array_equal +import pytest + +import numpy as np + +from scipy.optimize import linear_sum_assignment +from scipy.sparse import random +from scipy.sparse._sputils import matrix +from scipy.sparse.csgraph import min_weight_full_bipartite_matching +from scipy.sparse.csgraph.tests.test_matching import ( + linear_sum_assignment_assertions, linear_sum_assignment_test_cases +) + + +def test_linear_sum_assignment_input_shape(): + with pytest.raises(ValueError, match="expected a matrix"): + linear_sum_assignment([1, 2, 3]) + + +def test_linear_sum_assignment_input_object(): + C = [[1, 2, 3], [4, 5, 6]] + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(np.asarray(C))) + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(matrix(C))) + + +def test_linear_sum_assignment_input_bool(): + I = np.identity(3) + assert_array_equal(linear_sum_assignment(I.astype(np.bool_)), + linear_sum_assignment(I)) + + +def test_linear_sum_assignment_input_string(): + I = np.identity(3) + with pytest.raises(TypeError, match="Cannot cast array data"): + linear_sum_assignment(I.astype(str)) + + +def test_linear_sum_assignment_input_nan(): + I = np.diag([np.nan, 1, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_neginf(): + I = np.diag([1, -np.inf, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_inf(): + I = np.identity(3) + I[:, 0] = np.inf + with pytest.raises(ValueError, match="cost matrix is infeasible"): + linear_sum_assignment(I) + + +def test_constant_cost_matrix(): + # Fixes #11602 + n = 8 + C = np.ones((n, n)) + row_ind, col_ind = linear_sum_assignment(C) + assert_array_equal(row_ind, np.arange(n)) + assert_array_equal(col_ind, np.arange(n)) + + +@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) +def test_linear_sum_assignment_trivial_cost(num_rows, num_cols): + C = np.empty(shape=(num_cols, num_rows)) + row_ind, col_ind = linear_sum_assignment(C) + assert len(row_ind) == 0 + assert len(col_ind) == 0 + + +@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) +def test_linear_sum_assignment_small_inputs(sign, test_case): + linear_sum_assignment_assertions( + linear_sum_assignment, np.array, sign, test_case) + + +# Tests that combine scipy.optimize.linear_sum_assignment and +# scipy.sparse.csgraph.min_weight_full_bipartite_matching +def test_two_methods_give_same_result_on_many_sparse_inputs(): + # As opposed to the test above, here we do not spell out the expected + # output; only assert that the two methods give the same result. + # Concretely, the below tests 100 cases of size 100x100, out of which + # 36 are infeasible. + np.random.seed(1234) + for _ in range(100): + lsa_raises = False + mwfbm_raises = False + sparse = random(100, 100, density=0.06, + data_rvs=lambda size: np.random.randint(1, 100, size)) + # In csgraph, zeros correspond to missing edges, so we explicitly + # replace those with infinities + dense = np.full(sparse.shape, np.inf) + dense[sparse.row, sparse.col] = sparse.data + sparse = sparse.tocsr() + try: + row_ind, col_ind = linear_sum_assignment(dense) + lsa_cost = dense[row_ind, col_ind].sum() + except ValueError: + lsa_raises = True + try: + row_ind, col_ind = min_weight_full_bipartite_matching(sparse) + mwfbm_cost = sparse[row_ind, col_ind].sum() + except ValueError: + mwfbm_raises = True + # Ensure that if one method raises, so does the other one. + assert lsa_raises == mwfbm_raises + if not lsa_raises: + assert lsa_cost == mwfbm_cost diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ae5cc6f4d4065de7a33dab79edb44d550bfeeb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py @@ -0,0 +1,314 @@ +""" +Tests for line search routines +""" +from numpy.testing import (assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_warns, + suppress_warnings) +import scipy.optimize._linesearch as ls +from scipy.optimize._linesearch import LineSearchWarning +import numpy as np + + +def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): + """ + Check that strong Wolfe conditions apply + """ + phi1 = phi(s) + phi0 = phi(0) + derphi0 = derphi(0) + derphi1 = derphi(s) + msg = (f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; phi'(0) = {derphi0};" + f" phi'(s) = {derphi1}; {err_msg}") + + assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg + assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg + + +def assert_armijo(s, phi, c1=1e-4, err_msg=""): + """ + Check that Armijo condition applies + """ + phi1 = phi(s) + phi0 = phi(0) + msg = f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; {err_msg}" + assert phi1 <= (1 - c1*s)*phi0, msg + + +def assert_line_wolfe(x, p, s, f, fprime, **kw): + assert_wolfe(s, phi=lambda sp: f(x + p*sp), + derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) + + +def assert_line_armijo(x, p, s, f, **kw): + assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) + + +def assert_fp_equal(x, y, err_msg="", nulp=50): + """Assert two arrays are equal, up to some floating-point rounding error""" + try: + assert_array_almost_equal_nulp(x, y, nulp) + except AssertionError as e: + raise AssertionError(f"{e}\n{err_msg}") from e + + +class TestLineSearch: + # -- scalar functions; must have dphi(0.) < 0 + def _scalar_func_1(self, s): # skip name check + self.fcount += 1 + p = -s - s**3 + s**4 + dp = -1 - 3*s**2 + 4*s**3 + return p, dp + + def _scalar_func_2(self, s): # skip name check + self.fcount += 1 + p = np.exp(-4*s) + s**2 + dp = -4*np.exp(-4*s) + 2*s + return p, dp + + def _scalar_func_3(self, s): # skip name check + self.fcount += 1 + p = -np.sin(10*s) + dp = -10*np.cos(10*s) + return p, dp + + # -- n-d functions + + def _line_func_1(self, x): # skip name check + self.fcount += 1 + f = np.dot(x, x) + df = 2*x + return f, df + + def _line_func_2(self, x): # skip name check + self.fcount += 1 + f = np.dot(x, np.dot(self.A, x)) + 1 + df = np.dot(self.A + self.A.T, x) + return f, df + + # -- + + def setup_method(self): + self.scalar_funcs = [] + self.line_funcs = [] + self.N = 20 + self.fcount = 0 + + def bind_index(func, idx): + # Remember Python's closure semantics! + return lambda *a, **kw: func(*a, **kw)[idx] + + for name in sorted(dir(self)): + if name.startswith('_scalar_func_'): + value = getattr(self, name) + self.scalar_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + elif name.startswith('_line_func_'): + value = getattr(self, name) + self.line_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + + np.random.seed(1234) + self.A = np.random.randn(self.N, self.N) + + def scalar_iter(self): + for name, phi, derphi in self.scalar_funcs: + for old_phi0 in np.random.randn(3): + yield name, phi, derphi, old_phi0 + + def line_iter(self): + for name, f, fprime in self.line_funcs: + k = 0 + while k < 9: + x = np.random.randn(self.N) + p = np.random.randn(self.N) + if np.dot(p, fprime(x)) >= 0: + # always pick a descent direction + continue + k += 1 + old_fv = float(np.random.randn()) + yield name, f, fprime, x, p, old_fv + + # -- Generic scalar searches + + def test_scalar_search_wolfe1(self): + c = 0 + for name, phi, derphi, old_phi0 in self.scalar_iter(): + c += 1 + s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), + old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + assert_wolfe(s, phi, derphi, err_msg=name) + + assert c > 3 # check that the iterator really works... + + def test_scalar_search_wolfe2(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( + phi, derphi, phi(0), old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + if derphi1 is not None: + assert_fp_equal(derphi1, derphi(s), name) + assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}") + + def test_scalar_search_wolfe2_with_low_amax(self): + def phi(alpha): + return (alpha - 5) ** 2 + + def derphi(alpha): + return 2 * (alpha - 5) + + alpha_star, _, _, derphi_star = ls.scalar_search_wolfe2(phi, derphi, amax=0.001) + assert alpha_star is None # Not converged + assert derphi_star is None # Not converged + + def test_scalar_search_wolfe2_regression(self): + # Regression test for gh-12157 + # This phi has its minimum at alpha=4/3 ~ 1.333. + def phi(alpha): + if alpha < 1: + return - 3*np.pi/2 * (alpha - 1) + else: + return np.cos(3*np.pi/2 * alpha - np.pi) + + def derphi(alpha): + if alpha < 1: + return - 3*np.pi/2 + else: + return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi) + + s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi) + # Without the fix in gh-13073, the scalar_search_wolfe2 + # returned s=2.0 instead. + assert s < 1.5 + + def test_scalar_search_armijo(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) + assert_fp_equal(phi1, phi(s), name) + assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}") + + # -- Generic line searches + + def test_line_search_wolfe1(self): + c = 0 + smax = 100 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount, fc+gc) + assert_fp_equal(ofv, f(x)) + if s is None: + continue + assert_fp_equal(fv, f(x + s*p)) + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + + assert c > 3 # check that the iterator really works... + + def test_line_search_wolfe2(self): + c = 0 + smax = 512 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + with suppress_warnings() as sup: + sup.filter(LineSearchWarning, + "The line search algorithm could not find a solution") + sup.filter(LineSearchWarning, + "The line search algorithm did not converge") + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount, fc+gc) + assert_fp_equal(ofv, f(x)) + assert_fp_equal(fv, f(x + s*p)) + if gv is not None: + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + assert c > 3 # check that the iterator really works... + + def test_line_search_wolfe2_bounds(self): + # See gh-7475 + + # For this f and p, starting at a point on axis 0, the strong Wolfe + # condition 2 is met if and only if the step length s satisfies + # |x + s| <= c2 * |x| + def f(x): + return np.dot(x, x) + def fp(x): + return 2 * x + p = np.array([1, 0]) + + # Smallest s satisfying strong Wolfe conditions for these arguments is 30 + x = -60 * p + c2 = 0.5 + + s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) + assert_line_wolfe(x, p, s, f, fp) + + s, _, _, _, _, _ = assert_warns(LineSearchWarning, + ls.line_search_wolfe2, f, fp, x, p, + amax=29, c2=c2) + assert s is None + + # s=30 will only be tried on the 6th iteration, so this won't converge + assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, + c2=c2, maxiter=5) + + def test_line_search_armijo(self): + c = 0 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) + c += 1 + assert_equal(self.fcount, fc) + assert_fp_equal(fv, f(x + s*p)) + assert_line_armijo(x, p, s, f, err_msg=name) + assert c >= 9 + + # -- More specific tests + + def test_armijo_terminate_1(self): + # Armijo should evaluate the function only once if the trial step + # is already suitable + count = [0] + + def phi(s): + count[0] += 1 + return -s + 0.01*s**2 + s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) + assert_equal(s, 1) + assert_equal(count[0], 2) + assert_armijo(s, phi) + + def test_wolfe_terminate(self): + # wolfe1 and wolfe2 should also evaluate the function only a few + # times if the trial step is already suitable + + def phi(s): + count[0] += 1 + return -s + 0.05*s**2 + + def derphi(s): + count[0] += 1 + return -1 + 0.05*2*s + + for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: + count = [0] + r = func(phi, derphi, phi(0), None, derphi(0)) + assert r[0] is not None, (r, func) + assert count[0] <= 2 + 2, (count, func) + assert_wolfe(r[0], phi, derphi, err_msg=str(func)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..49a0f8de5a20b82b3b036d2e835951d927c266ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py @@ -0,0 +1,2473 @@ +""" +Unit test for Linear Programming +""" +import sys +import platform + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + assert_array_less, assert_warns, suppress_warnings) +from pytest import raises as assert_raises +from scipy.optimize import linprog, OptimizeWarning +from scipy.optimize._numdiff import approx_derivative +from scipy.sparse.linalg import MatrixRankWarning +from scipy.linalg import LinAlgWarning +from scipy._lib._util import VisibleDeprecationWarning +import scipy.sparse +import pytest + +has_umfpack = True +try: + from scikits.umfpack import UmfpackWarning +except ImportError: + has_umfpack = False + +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 +except ImportError: + has_cholmod = False + + +def _assert_iteration_limit_reached(res, maxiter): + assert_(not res.success, "Incorrectly reported success") + assert_(res.success < maxiter, "Incorrectly reported number of iterations") + assert_equal(res.status, 1, "Failed to report iteration limit reached") + + +def _assert_infeasible(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 2, "failed to report infeasible status") + + +def _assert_unbounded(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 3, "failed to report unbounded status") + + +def _assert_unable_to_find_basic_feasible_sol(res): + # res: linprog result object + + # The status may be either 2 or 4 depending on why the feasible solution + # could not be found. If the underlying problem is expected to not have a + # feasible solution, _assert_infeasible should be used. + assert_(not res.success, "incorrectly reported success") + assert_(res.status in (2, 4), "failed to report optimization failure") + + +def _assert_success(res, desired_fun=None, desired_x=None, + rtol=1e-8, atol=1e-8): + # res: linprog result object + # desired_fun: desired objective function value or None + # desired_x: desired solution or None + if not res.success: + msg = f"linprog status {res.status}, message: {res.message}" + raise AssertionError(msg) + + assert_equal(res.status, 0) + if desired_fun is not None: + assert_allclose(res.fun, desired_fun, + err_msg="converged to an unexpected objective value", + rtol=rtol, atol=atol) + if desired_x is not None: + assert_allclose(res.x, desired_x, + err_msg="converged to an unexpected solution", + rtol=rtol, atol=atol) + + +def magic_square(n): + """ + Generates a linear program for which integer solutions represent an + n x n magic square; binary decision variables represent the presence + (or absence) of an integer 1 to n^2 in each position of the square. + """ + + np.random.seed(0) + M = n * (n**2 + 1) / 2 + + numbers = np.arange(n**4) // n**2 + 1 + + numbers = numbers.reshape(n**2, n, n) + + zeros = np.zeros((n**2, n, n)) + + A_list = [] + b_list = [] + + # Rule 1: use every number exactly once + for i in range(n**2): + A_row = zeros.copy() + A_row[i, :, :] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 2: Only one number per square + for i in range(n): + for j in range(n): + A_row = zeros.copy() + A_row[:, i, j] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 3: sum of rows is M + for i in range(n): + A_row = zeros.copy() + A_row[:, i, :] = numbers[:, i, :] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 4: sum of columns is M + for i in range(n): + A_row = zeros.copy() + A_row[:, :, i] = numbers[:, :, i] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 5: sum of diagonals is M + A_row = zeros.copy() + A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] + A_list.append(A_row.flatten()) + b_list.append(M) + A_row = zeros.copy() + A_row[:, range(n), range(-1, -n - 1, -1)] = \ + numbers[:, range(n), range(-1, -n - 1, -1)] + A_list.append(A_row.flatten()) + b_list.append(M) + + A = np.array(np.vstack(A_list), dtype=float) + b = np.array(b_list, dtype=float) + c = np.random.rand(A.shape[1]) + + return A, b, c, numbers, M + + +def lpgen_2d(m, n): + """ -> A b c LP test: m*n vars, m+n constraints + row sums == n/m, col sums == 1 + https://gist.github.com/denis-bz/8647461 + """ + np.random.seed(0) + c = - np.random.exponential(size=(m, n)) + Arow = np.zeros((m, m * n)) + brow = np.zeros(m) + for j in range(m): + j1 = j + 1 + Arow[j, j * n:j1 * n] = 1 + brow[j] = n / m + + Acol = np.zeros((n, m * n)) + bcol = np.zeros(n) + for j in range(n): + j1 = j + 1 + Acol[j, j::n] = 1 + bcol[j] = 1 + + A = np.vstack((Arow, Acol)) + b = np.hstack((brow, bcol)) + + return A, b, c.ravel() + + +def very_random_gen(seed=0): + np.random.seed(seed) + m_eq, m_ub, n = 10, 20, 50 + c = np.random.rand(n)-0.5 + A_ub = np.random.rand(m_ub, n)-0.5 + b_ub = np.random.rand(m_ub)-0.5 + A_eq = np.random.rand(m_eq, n)-0.5 + b_eq = np.random.rand(m_eq)-0.5 + lb = -np.random.rand(n) + ub = np.random.rand(n) + lb[lb < -np.random.rand()] = -np.inf + ub[ub > np.random.rand()] = np.inf + bounds = np.vstack((lb, ub)).T + return c, A_ub, b_ub, A_eq, b_eq, bounds + + +def nontrivial_problem(): + c = [-1, 8, 4, -6] + A_ub = [[-7, -7, 6, 9], + [1, -1, -3, 0], + [10, -10, -7, 7], + [6, -1, 3, 4]] + b_ub = [-3, 6, -6, 6] + A_eq = [[-10, 1, 1, -8]] + b_eq = [-4] + x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] + f_star = 7083 / 1391 + return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star + + +def l1_regression_prob(seed=0, m=8, d=9, n=100): + ''' + Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)} + x in R^d + y in R + n: number of training samples + d: dimension of x, i.e. x in R^d + phi: feature map R^d -> R^m + m: dimension of feature space + ''' + np.random.seed(seed) + phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping + w_true = np.random.randn(m) + x = np.random.normal(0, 1, size=(d, n)) # features + y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements + + # construct the problem + c = np.ones(m+n) + c[:m] = 0 + A_ub = scipy.sparse.lil_matrix((2*n, n+m)) + idx = 0 + for ii in range(n): + A_ub[idx, :m] = phi @ x[:, ii] + A_ub[idx, m+ii] = -1 + A_ub[idx+1, :m] = -1*phi @ x[:, ii] + A_ub[idx+1, m+ii] = -1 + idx += 2 + A_ub = A_ub.tocsc() + b_ub = np.zeros(2*n) + b_ub[0::2] = y + b_ub[1::2] = -y + bnds = [(None, None)]*m + [(0, None)]*n + return c, A_ub, b_ub, bnds + + +def generic_callback_test(self): + # Check that callback is as advertised + last_cb = {} + + def cb(res): + message = res.pop('message') + complete = res.pop('complete') + + assert_(res.pop('phase') in (1, 2)) + assert_(res.pop('status') in range(4)) + assert_(isinstance(res.pop('nit'), int)) + assert_(isinstance(complete, bool)) + assert_(isinstance(message, str)) + + last_cb['x'] = res['x'] + last_cb['fun'] = res['fun'] + last_cb['slack'] = res['slack'] + last_cb['con'] = res['con'] + + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) + + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + assert_allclose(last_cb['fun'], res['fun']) + assert_allclose(last_cb['x'], res['x']) + assert_allclose(last_cb['con'], res['con']) + assert_allclose(last_cb['slack'], res['slack']) + + +def test_unknown_solvers_and_options(): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') + message = "Unrecognized options detected: {'rr_method': 'ekki-ekki-ekki'}" + with pytest.warns(OptimizeWarning, match=message): + linprog(c, A_ub=A_ub, b_ub=b_ub, + options={"rr_method": 'ekki-ekki-ekki'}) + + +def test_choose_solver(): + # 'highs' chooses 'dual' + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + res = linprog(c, A_ub, b_ub, method='highs') + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + +def test_deprecation(): + with pytest.warns(DeprecationWarning): + linprog(1, method='interior-point') + with pytest.warns(DeprecationWarning): + linprog(1, method='revised simplex') + with pytest.warns(DeprecationWarning): + linprog(1, method='simplex') + + +def test_highs_status_message(): + res = linprog(1, method='highs') + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.status == 0 + assert res.message.startswith(msg) + + A, b, c, numbers, M = magic_square(6) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + options = {"time_limit": 0.1} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs', + options=options, integrality=integrality) + msg = "Time limit reached. (HiGHS Status 13:" + assert res.status == 1 + assert res.message.startswith(msg) + + options = {"maxiter": 10} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs-ds', + options=options) + msg = "Iteration limit reached. (HiGHS Status 14:" + assert res.status == 1 + assert res.message.startswith(msg) + + res = linprog(1, bounds=(1, -1), method='highs') + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.status == 2 + assert res.message.startswith(msg) + + res = linprog(-1, method='highs') + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.status == 3 + assert res.message.startswith(msg) + + from scipy.optimize._linprog_highs import _highs_to_scipy_status_message + status, message = _highs_to_scipy_status_message(58, "Hello!") + msg = "The HiGHS status code was not recognized. (HiGHS Status 58:" + assert status == 4 + assert message.startswith(msg) + + status, message = _highs_to_scipy_status_message(None, None) + msg = "HiGHS did not provide a status code. (HiGHS Status None: None)" + assert status == 4 + assert message.startswith(msg) + + +def test_bug_17380(): + linprog([1, 1], A_ub=[[-1, 0]], b_ub=[-2.5], integrality=[1, 1]) + + +A_ub = None +b_ub = None +A_eq = None +b_eq = None +bounds = None + +################ +# Common Tests # +################ + + +class LinprogCommonTests: + """ + Base class for `linprog` tests. Generally, each test will be performed + once for every derived class of LinprogCommonTests, each of which will + typically change self.options and/or self.method. Effectively, these tests + are run for many combination of method (simplex, revised simplex, and + interior point) and options (such as pivoting rule or sparse treatment). + """ + + ################## + # Targeted Tests # + ################## + + def test_callback(self): + generic_callback_test(self) + + def test_disp(self): + # test that display option does not break anything. + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"disp": True}) + _assert_success(res, desired_fun=-64.049494229) + + def test_docstring_example(self): + # Example from linprog docstring. + c = [-1, 4] + A = [[-3, 1], [1, 2]] + b = [6, 4] + x0_bounds = (None, None) + x1_bounds = (-3, None) + res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), + options=self.options, method=self.method) + _assert_success(res, desired_fun=-22) + + def test_type_error(self): + # (presumably) checks that linprog recognizes type errors + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1] + A_eq = [[1]] + b_eq = "hello" + assert_raises(TypeError, linprog, + c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + + def test_aliasing_b_ub(self): + # (presumably) checks that linprog does not modify b_ub + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_ub = np.array([[1.0]]) + b_ub_orig = np.array([3.0]) + b_ub = b_ub_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-4, desired_x=[-4]) + assert_allclose(b_ub_orig, b_ub) + + def test_aliasing_b_eq(self): + # (presumably) checks that linprog does not modify b_eq + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq_orig = np.array([3.0]) + b_eq = b_eq_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + assert_allclose(b_eq_orig, b_eq) + + def test_non_ndarray_args(self): + # (presumably) checks that linprog accepts list in place of arrays + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1.0] + A_ub = [[1.0]] + b_ub = [3.0] + A_eq = [[1.0]] + b_eq = [2.0] + bounds = (-1.0, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=2, desired_x=[2]) + + def test_unknown_options(self): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + def f(c, A_ub=None, b_ub=None, A_eq=None, + b_eq=None, bounds=None, options={}): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=options) + + o = {key: self.options[key] for key in self.options} + o['spam'] = 42 + + assert_warns(OptimizeWarning, f, + c, A_ub=A_ub, b_ub=b_ub, options=o) + + def test_integrality_without_highs(self): + # ensure that using `integrality` parameter without `method='highs'` + # raises warning and produces correct solution to relaxed problem + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + with np.testing.assert_warns(OptimizeWarning): + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1.8, 2.8]) + np.testing.assert_allclose(res.fun, -2.8) + + def test_invalid_inputs(self): + + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Test ill-formatted bounds + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) + + # Test other invalid inputs + assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) + + # this last check doesn't make sense for sparse presolve + if ("_sparse_presolve" in self.options and + self.options["_sparse_presolve"]): + return + # there aren't 3-D sparse matrices + + assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) + + def test_sparse_constraints(self): + # gh-13559: improve error message for sparse inputs when unsupported + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + np.random.seed(0) + m = 100 + n = 150 + A_eq = scipy.sparse.rand(m, n, 0.5) + x_valid = np.random.randn(n) + c = np.random.randn(n) + ub = x_valid + np.random.rand(n) + lb = x_valid - np.random.rand(n) + bounds = np.column_stack((lb, ub)) + b_eq = A_eq * x_valid + + if self.method in {'simplex', 'revised simplex'}: + # simplex and revised simplex should raise error + with assert_raises(ValueError, match=f"Method '{self.method}' " + "does not support sparse constraint matrices."): + linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + else: + # other methods should succeed + options = {**self.options} + if self.method in {'interior-point'}: + options['sparse'] = True + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=options) + assert res.success + + def test_maxiter(self): + # test iteration limit w/ Enzo example + c = [4, 8, 3, 0, 0, 0] + A = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b = [185, 155, 600] + np.random.seed(0) + maxiter = 3 + res = linprog(c, A_eq=A, b_eq=b, method=self.method, + options={"maxiter": maxiter}) + _assert_iteration_limit_reached(res, maxiter) + assert_equal(res.nit, maxiter) + + def test_bounds_fixed(self): + + # Test fixed bounds (upper equal to lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, 1), + method=self.method, options=self.options) + _assert_success(res, 1, 1) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], + method=self.method, options=self.options) + _assert_success(res, 12, [5, -1, 3]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1], bounds=[(1, 1), (1, 3)], + method=self.method, options=self.options) + _assert_success(res, 2, [1, 1]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], + bounds=[(-5, 5), (0, 10), (3.5, 3.5)], + method=self.method, options=self.options) + _assert_success(res, 15, [1, 7, 3.5]) + if do_presolve: + assert_equal(res.nit, 0) + + def test_bounds_infeasible(self): + + # Test ill-valued bounds (upper less than lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + def test_bounds_infeasible_2(self): + + # Test ill-valued bounds (lower inf, upper -inf) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + # For the simplex method, the cases do not result in an + # infeasible status, but in a RuntimeWarning. This is a + # consequence of having _presolve() take care of feasibility + # checks. See issue gh-11618. + do_presolve = self.options.get('presolve', True) + simplex_without_presolve = not do_presolve and self.method == 'simplex' + + c = [1, 2, 3] + bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] + bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] + + if simplex_without_presolve: + def g(c, bounds): + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + return res + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_1) + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_2) + else: + res = linprog(c=c, bounds=bounds_1, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + res = linprog(c=c, bounds=bounds_2, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + def test_empty_constraint_1(self): + c = [-1, -2] + res = linprog(c, method=self.method, options=self.options) + _assert_unbounded(res) + + def test_empty_constraint_2(self): + c = [-1, 1, -1, 1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve requires no iterations + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_empty_constraint_3(self): + c = [1, -1, 1, -1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) + + def test_inequality_constraints(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf + c = np.array([3, 2]) * -1 # maximize + A_ub = [[2, 1], + [1, 1], + [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-18, desired_x=[2, 6]) + + def test_inequality_constraints2(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf + # (dead link) + c = [6, 3] + A_ub = [[0, 3], + [-1, -1], + [-2, 1]] + b_ub = [2, -1, -1] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) + + def test_bounds_simple(self): + c = [1, 2] + bounds = (1, 2) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + bounds = [(1, 2), (1, 2)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + def test_bounded_below_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (1.0, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_below_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (0.5, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounded_above_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (None, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_above_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, 4) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_infinity(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_mixed(self): + # Problem has one unbounded variable and + # another with a negative lower bound. + c = np.array([-1, 4]) * -1 # maximize + A_ub = np.array([[-3, 1], + [1, 2]], dtype=np.float64) + b_ub = [6, 4] + x0_bounds = (-np.inf, np.inf) + x1_bounds = (-3, np.inf) + bounds = (x0_bounds, x1_bounds) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) + + def test_bounds_equal_but_infeasible(self): + c = [-4, 1] + A_ub = [[7, -2], [0, 1], [2, -2]] + b_ub = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_but_infeasible2(self): + c = [-4, 1] + A_eq = [[7, -2], [0, 1], [2, -2]] + b_eq = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_no_presolve(self): + # There was a bug when a lower and upper bound were equal but + # presolve was not on to eliminate the variable. The bound + # was being converted to an equality constraint, but the bound + # was not eliminated, leading to issues in postprocessing. + c = [1, 2] + A_ub = [[1, 2], [1.1, 2.2]] + b_ub = [4, 8] + bounds = [(1, 2), (2, 2)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_zero_column_1(self): + m, n = 3, 4 + np.random.seed(0) + c = np.random.rand(n) + c[1] = 1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + A_ub = [[1, 0, 1, 1]] + b_ub = 3 + bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-9.7087836730413404) + + def test_zero_column_2(self): + if self.method in {'highs-ds', 'highs-ipm'}: + # See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648 + pytest.xfail() + + np.random.seed(0) + m, n = 2, 4 + c = np.random.rand(n) + c[1] = -1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + + A_ub = np.random.rand(m, n) + A_ub[:, 1] = 0 + b_ub = np.random.rand(m) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve + if self.options.get('presolve', True) and "highs" not in self.method: + # HiGHS detects unboundedness or infeasibility in presolve + # It needs an iteration of simplex to be sure of unboundedness + # Other solvers report that the problem is unbounded if feasible + assert_equal(res.nit, 0) + + def test_zero_row_1(self): + c = [1, 2, 3] + A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_eq = [0, 3, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3) + + def test_zero_row_2(self): + A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_ub = [0, 3, 0] + c = [1, 2, 3] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0) + + def test_zero_row_3(self): + m, n = 2, 4 + c = np.random.rand(n) + A_eq = np.random.rand(m, n) + A_eq[0, :] = 0 + b_eq = np.random.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_zero_row_4(self): + m, n = 2, 4 + c = np.random.rand(n) + A_ub = np.random.rand(m, n) + A_ub[0, :] = 0 + b_ub = -np.random.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_1(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 2, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_2(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 1, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=4) + + def test_singleton_row_ub_1(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -2, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_ub_2(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -0.5, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0.5) + + def test_infeasible(self): + # Test linprog response to an infeasible problem + c = [-1, -1] + A_ub = [[1, 0], + [0, 1], + [-1, -1]] + b_ub = [2, 2, -5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_infeasible_inequality_bounds(self): + c = [1] + A_ub = [[2]] + b_ub = 4 + bounds = (5, 6) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_unbounded(self): + # Test linprog response to an unbounded problem + c = np.array([1, 1]) * -1 # maximize + A_ub = [[-1, 1], + [-1, -1]] + b_ub = [-1, -2] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_unbounded_below_no_presolve_corrected(self): + c = [1] + bounds = [(None, 1)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c=c, bounds=bounds, + method=self.method, + options=o) + if self.method == "revised simplex": + # Revised simplex has a special pathway for no constraints. + assert_equal(res.status, 5) + else: + _assert_unbounded(res) + + def test_unbounded_no_nontrivial_constraints_1(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, -1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_unbounded_no_nontrivial_constraints_2(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, 1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (None, 0)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], -np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_cyclic_recovery(self): + # Test linprogs recovery from cycling using the Klee-Minty problem + # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf + c = np.array([100, 10, 1]) * -1 # maximize + A_ub = [[1, 0, 0], + [20, 1, 0], + [200, 20, 1]] + b_ub = [1, 100, 10000] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) + + def test_cyclic_bland(self): + # Test the effect of Bland's rule on a cycling problem + c = np.array([-10, 57, 9, 24.]) + A_ub = np.array([[0.5, -5.5, -2.5, 9], + [0.5, -1.5, -0.5, 1], + [1, 0, 0, 0]]) + b_ub = [0, 0, 1] + + # copy the existing options dictionary but change maxiter + maxiter = 100 + o = {key: val for key, val in self.options.items()} + o['maxiter'] = maxiter + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + + if self.method == 'simplex' and not self.options.get('bland'): + # simplex cycles without Bland's rule + _assert_iteration_limit_reached(res, o['maxiter']) + else: + # other methods, including simplex with Bland's rule, succeed + _assert_success(res, desired_x=[1, 0, 1, 0]) + # note that revised simplex skips this test because it may or may not + # cycle depending on the initial basis + + def test_remove_redundancy_infeasibility(self): + # mostly a test of redundancy removal, which is carefully tested in + # test__remove_redundancy.py + m, n = 10, 10 + c = np.random.rand(n) + A_eq = np.random.rand(m, n) + b_eq = np.random.rand(m) + A_eq[-1, :] = 2 * A_eq[-2, :] + b_eq[-1] *= -1 + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + ################# + # General Tests # + ################# + + def test_nontrivial_problem(self): + # Problem involves all constraint types, + # negative resource limits, and rounding issues. + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + + def test_lpgen_problem(self): + # Test linprog with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + A_ub, b_ub, c = lpgen_2d(20, 20) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-64.049494229) + + def test_network_flow(self): + # A network flow problem with supply and demand at nodes + # and with costs along directed edges. + # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf + c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] + n, p = -1, 1 + A_eq = [ + [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], + [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], + [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], + [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], + [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] + b_eq = [0, 19, -16, 33, 0, 0, -36] + with suppress_warnings() as sup: + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) + + def test_network_flow_limited_capacity(self): + # A network flow problem with supply and demand at nodes + # and with costs and capacities along directed edges. + # http://blog.sommer-forst.de/2013/04/10/ + c = [2, 2, 1, 3, 1] + bounds = [ + [0, 4], + [0, 2], + [0, 2], + [0, 3], + [0, 5]] + n, p = -1, 1 + A_eq = [ + [n, n, 0, 0, 0], + [p, 0, n, n, 0], + [0, p, p, 0, n], + [0, 0, 0, p, p]] + b_eq = [-4, 0, 0, 4] + + with suppress_warnings() as sup: + # this is an UmfpackWarning but I had trouble importing it + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=14) + + def test_simplex_algorithm_wikipedia_example(self): + # https://en.wikipedia.org/wiki/Simplex_algorithm#Example + c = [-2, -3, -4] + A_ub = [ + [3, 2, 1], + [2, 5, 3]] + b_ub = [10, 15] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-20) + + def test_enzo_example(self): + # https://github.com/scipy/scipy/issues/1779 lp2.py + # + # Translated from Octave code at: + # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm + # and placed under MIT licence by Enzo Michelangeli + # with permission explicitly granted by the original author, + # Prof. Kazunobu Yoshida + c = [4, 8, 3, 0, 0, 0] + A_eq = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b_eq = [185, 155, 600] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=317.5, + desired_x=[66.25, 0, 17.5, 0, 183.75, 0], + atol=6e-6, rtol=1e-7) + + def test_enzo_example_b(self): + # rescued from https://github.com/scipy/scipy/pull/218 + c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] + A_eq = [[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]] + b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-1.77, + desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) + + def test_enzo_example_c_with_degeneracy(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 20 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) + + def test_enzo_example_c_with_unboundedness(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + # This test relies on `cos(0) -1 == sin(0)`, so ensure that's true + # (SIMD code or -ffast-math may cause spurious failures otherwise) + row0 = np.cos(tmp) - 1 + row0[0] = 0.0 + row1 = np.sin(tmp) + row1[0] = 0.0 + A_eq = np.vstack((row0, row1)) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_enzo_example_c_with_infeasibility(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [1, 1] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_basic_artificial_vars(self): + # Problem is chosen to test two phase simplex methods when at the end + # of phase 1 some artificial variables remain in the basis. + # Also, for `method='simplex'`, the row in the tableau corresponding + # with the artificial variables is not all zero. + c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) + A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], + [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], + [1.0, 1.0, 0, 0, 0, 0]]) + b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) + A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) + b_eq = np.array([0, 0]) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), + atol=2e-6) + + def test_optimize_result(self): + # check all fields in OptimizeResult + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + assert_(res.success) + assert_(res.nit) + assert_(not res.status) + if 'highs' not in self.method: + # HiGHS status/message tested separately + assert_(res.message == "Optimization terminated successfully.") + assert_allclose(c @ res.x, res.fun) + assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) + assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) + for key in ['eqlin', 'ineqlin', 'lower', 'upper']: + if key in res.keys(): + assert isinstance(res[key]['marginals'], np.ndarray) + assert isinstance(res[key]['residual'], np.ndarray) + + ################# + # Bug Fix Tests # + ################# + + def test_bug_5400(self): + # https://github.com/scipy/scipy/issues/5400 + bounds = [ + (0, None), + (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), + (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), + (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] + + f = 1 / 9 + g = -1e4 + h = -3.1 + A_ub = np.array([ + [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], + [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], + [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], + [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], + [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], + [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], + [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], + [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) + + b_ub = np.array([ + 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, + 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + + c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-106.63507541835018) + + def test_bug_6139(self): + # linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + + # Note: This is not strictly a bug as the default tolerance determines + # if a result is "close enough" to zero and should not be expected + # to work for all cases. + + c = np.array([1, 1, 1]) + A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) + b_eq = np.array([5.00000000e+00, -1.00000000e+04]) + A_ub = -np.array([[0., 1000000., 1010000.]]) + b_ub = -np.array([10000000.]) + bounds = (None, None) + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + _assert_success(res, desired_fun=14.95, + desired_x=np.array([5, 4.95, 5])) + + def test_bug_6690(self): + # linprog simplex used to violate bound constraint despite reporting + # success. + # https://github.com/scipy/scipy/issues/6690 + + A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) + b_eq = np.array([0.9626]) + A_ub = np.array([ + [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], + [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], + [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] + ]) + b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) + bounds = np.array([ + [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], + [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] + ]).T + c = np.array([ + -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 + ]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, + "Solving system with option 'cholesky'") + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = -1.19099999999 + desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, + 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) + _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) + + # Add small tol value to ensure arrays are less than or equal. + atol = 1e-6 + assert_array_less(bounds[:, 0] - atol, res.x) + assert_array_less(res.x, bounds[:, 1] + atol) + + def test_bug_7044(self): + # linprog simplex failed to "identify correct constraints" (?) + # leading to a non-optimal solution if A is rank-deficient. + # https://github.com/scipy/scipy/issues/7044 + + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = 1.730550597 + _assert_success(res, desired_fun=desired_fun) + assert_allclose(A_eq.dot(res.x), b_eq) + assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) + + def test_bug_7237(self): + # https://github.com/scipy/scipy/issues/7237 + # linprog simplex "explodes" when the pivot value is very + # close to zero. + + c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) + A_ub = np.array([ + [1., -724., 911., -551., -555., -896., 478., -80., -293.], + [1., 566., 42., 937., 233., 883., 392., -909., 57.], + [1., -208., -894., 539., 321., 532., -924., 942., 55.], + [1., 857., -859., 83., 462., -265., -971., 826., 482.], + [1., 314., -424., 245., -424., 194., -443., -104., -429.], + [1., 540., 679., 361., 149., -827., 876., 633., 302.], + [0., -1., -0., -0., -0., -0., -0., -0., -0.], + [0., -0., -1., -0., -0., -0., -0., -0., -0.], + [0., -0., -0., -1., -0., -0., -0., -0., -0.], + [0., -0., -0., -0., -1., -0., -0., -0., -0.], + [0., -0., -0., -0., -0., -1., -0., -0., -0.], + [0., -0., -0., -0., -0., -0., -1., -0., -0.], + [0., -0., -0., -0., -0., -0., -0., -1., -0.], + [0., -0., -0., -0., -0., -0., -0., -0., -1.], + [0., 1., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 1.] + ]) + b_ub = np.array([ + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) + A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) + b_eq = np.array([[1.]]) + bounds = [(None, None)] * 9 + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=108.568535, atol=1e-6) + + def test_bug_8174(self): + # https://github.com/scipy/scipy/issues/8174 + # The simplex method sometimes "explodes" if the pivot value is very + # close to zero. + A_ub = np.array([ + [22714, 1008, 13380, -2713.5, -1116], + [-4986, -1092, -31220, 17386.5, 684], + [-4986, 0, 0, -2713.5, 0], + [22714, 0, 0, 17386.5, 0]]) + b_ub = np.zeros(A_ub.shape[0]) + c = -np.ones(A_ub.shape[1]) + bounds = [(0, 1)] * A_ub.shape[1] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': + _assert_unable_to_find_basic_feasible_sol(res) + else: + _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) + + def test_bug_8174_2(self): + # Test supplementary example from issue 8174. + # https://github.com/scipy/scipy/issues/8174 + # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution + c = np.array([1, 0, 0, 0, 0, 0, 0]) + A_ub = -np.identity(7) + b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) + A_eq = np.array([ + [1, 1, 1, 1, 1, 1, 0], + [0.3, 1.3, 0.9, 0, 0, 0, -1], + [0.3, 0, 0, 0, 0, 0, -2/3], + [0, 0.65, 0, 0, 0, 0, -1/15], + [0, 0, 0.3, 0, 0, 0, -1/15] + ]) + b_eq = np.array([[100], [0], [0], [0], [0]]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=43.3333333331385) + + def test_bug_8561(self): + # Test that pivot row is chosen correctly when using Bland's rule + # This was originally written for the simplex method with + # Bland's rule only, but it doesn't hurt to test all methods/options + # https://github.com/scipy/scipy/issues/8561 + c = np.array([7, 0, -4, 1.5, 1.5]) + A_ub = np.array([ + [4, 5.5, 1.5, 1.0, -3.5], + [1, -2.5, -2, 2.5, 0.5], + [3, -0.5, 4, -12.5, -7], + [-1, 4.5, 2, -3.5, -2], + [5.5, 2, -4.5, -1, 9.5]]) + b_ub = np.array([0, 0, 0, 0, 1]) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, + method=self.method) + _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) + + def test_bug_8662(self): + # linprog simplex used to report incorrect optimal results + # https://github.com/scipy/scipy/issues/8662 + c = [-10, 10, 6, 3] + A_ub = [[8, -8, -4, 6], + [-8, 8, 4, -6], + [-4, 4, 8, -4], + [3, -3, -3, -10]] + b_ub = [9, -9, -9, -4] + bounds = [(0, None), (0, None), (0, None), (0, None)] + desired_fun = 36.0000000000 + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Set boundary condition as a constraint + A_ub.append([0, 0, -1, 0]) + b_ub.append(0) + bounds[2] = (None, None) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + rtol = 1e-5 + _assert_success(res1, desired_fun=desired_fun, rtol=rtol) + _assert_success(res2, desired_fun=desired_fun, rtol=rtol) + + def test_bug_8663(self): + # exposed a bug in presolve + # https://github.com/scipy/scipy/issues/8663 + c = [1, 5] + A_eq = [[0, -7]] + b_eq = [-6] + bounds = [(0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + # tested for interior-point with presolve off in TestLinprogIPSpecific + # https://github.com/scipy/scipy/issues/8664 + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bug_8973(self): + """ + Test whether bug described at: + https://github.com/scipy/scipy/issues/8973 + was fixed. + """ + c = np.array([0, 0, 0, 1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) + b_ub = np.array([2, -2]) + bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + # solution vector x is not unique + _assert_success(res, desired_fun=-2) + # HiGHS IPM had an issue where the following wasn't true! + assert_equal(c @ res.x, res.fun) + + def test_bug_8973_2(self): + """ + Additional test for: + https://github.com/scipy/scipy/issues/8973 + suggested in + https://github.com/scipy/scipy/pull/8985 + review by @antonior92 + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[-2], desired_fun=0) + + def test_bug_10124(self): + """ + Test for linprog docstring problem + 'disp'=True caused revised simplex failure + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + c = [-1, 4] + A_ub = [[-3, 1], [1, 2]] + b_ub = [6, 4] + bounds = [(None, None), (-3, None)] + o = {"disp": True} + o.update(self.options) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_x=[10, -3], desired_fun=-22) + + def test_bug_10349(self): + """ + Test for redundancy removal tolerance issue + https://github.com/scipy/scipy/issues/10349 + """ + A_eq = np.array([[1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1]]) + b_eq = np.array([221, 210, 10, 141, 198, 102]) + c = np.concatenate((0, 1, np.zeros(4)), axis=None) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) + + @pytest.mark.skipif(sys.platform == 'darwin', + reason=("Failing on some local macOS builds, " + "see gh-13846")) + def test_bug_10466(self): + """ + Test that autoscale fixes poorly-scaled problem + """ + c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] + A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] + + b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, + 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09] + + o = {} + # HiGHS methods don't use autoscale option + if not self.method.startswith("highs"): + o = {"autoscale": True} + o.update(self.options) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option...") + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(RuntimeWarning, "divide by zero encountered...") + sup.filter(RuntimeWarning, "overflow encountered...") + sup.filter(RuntimeWarning, "invalid value encountered...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + assert_allclose(res.fun, -8589934560) + +######################### +# Method-specific Tests # +######################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogSimplexTests(LinprogCommonTests): + method = "simplex" + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogIPTests(LinprogCommonTests): + method = "interior-point" + + def test_bug_10466(self): + pytest.skip("Test is failing, but solver is deprecated.") + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogRSTests(LinprogCommonTests): + method = "revised simplex" + + # Revised simplex does not reliably solve these problems. + # Failure is intermittent due to the random choice of elements to complete + # the basis after phase 1 terminates. In any case, linprog exists + # gracefully, reporting numerical difficulties. I do not think this should + # prevent revised simplex from being merged, as it solves the problems + # most of the time and solves a broader range of problems than the existing + # simplex implementation. + # I believe that the root cause is the same for all three and that this + # same issue prevents revised simplex from solving many other problems + # reliably. Somehow the pivoting rule allows the algorithm to pivot into + # a singular basis. I haven't been able to find a reference that + # acknowledges this possibility, suggesting that there is a bug. On the + # other hand, the pivoting rule is quite simple, and I can't find a + # mistake, which suggests that this is a possibility with the pivoting + # rule. Hopefully, a better pivoting rule will fix the issue. + + def test_bug_5400(self): + pytest.skip("Intermittent failure acceptable.") + + def test_bug_8662(self): + pytest.skip("Intermittent failure acceptable.") + + def test_network_flow(self): + pytest.skip("Intermittent failure acceptable.") + + +class LinprogHiGHSTests(LinprogCommonTests): + def test_callback(self): + # this is the problem from test_callback + def cb(res): + return None + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, + callback=cb, method=self.method) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + @pytest.mark.parametrize("options", + [{"maxiter": -1}, + {"disp": -1}, + {"presolve": -1}, + {"time_limit": -1}, + {"dual_feasibility_tolerance": -1}, + {"primal_feasibility_tolerance": -1}, + {"ipm_optimality_tolerance": -1}, + {"simplex_dual_edge_weight_strategy": "ekki"}, + ]) + def test_invalid_option_values(self, options): + def f(options): + linprog(1, method=self.method, options=options) + options.update(self.options) + assert_warns(OptimizeWarning, f, options=options) + + def test_crossover(self): + A_eq, b_eq, c, _, _ = magic_square(4) + bounds = (0, 1) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + # there should be nonzero crossover iterations for IPM (only) + assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") + + def test_marginals(self): + # Ensure lagrange multipliers are correct by comparing the derivative + # w.r.t. b_ub/b_eq/ub/lb to the reported duals. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + lb, ub = bounds.T + + # sensitivity w.r.t. b_ub + def f_bub(x): + return linprog(c, A_ub, x, A_eq, b_eq, bounds, + method=self.method).fun + + dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun) + assert_allclose(res.ineqlin.marginals, dfdbub) + + # sensitivity w.r.t. b_eq + def f_beq(x): + return linprog(c, A_ub, b_ub, A_eq, x, bounds, + method=self.method).fun + + dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun) + assert_allclose(res.eqlin.marginals, dfdbeq) + + # sensitivity w.r.t. lb + def f_lb(x): + bounds = np.array([x, ub]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + # approx_derivative has trouble where lb is infinite + dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun) + dfdlb[~np.isfinite(lb)] = 0 + + assert_allclose(res.lower.marginals, dfdlb) + + # sensitivity w.r.t. ub + def f_ub(x): + bounds = np.array([lb, x]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun) + dfdub[~np.isfinite(ub)] = 0 + + assert_allclose(res.upper.marginals, dfdub) + + def test_dual_feasibility(self): + # Ensure solution is dual feasible using marginals + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT dual feasibility equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf + resid = (-c + A_ub.T @ res.ineqlin.marginals + + A_eq.T @ res.eqlin.marginals + + res.upper.marginals + + res.lower.marginals) + assert_allclose(resid, 0, atol=1e-12) + + def test_complementary_slackness(self): + # Ensure that the complementary slackness condition is satisfied. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT complementary slackness equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for + # non-zero RHS + assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0) + + +################################ +# Simplex Option-Specific Tests# +################################ + + +class TestLinprogSimplexDefault(LinprogSimplexTests): + + def setup_method(self): + self.options = {} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_7237_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexBland(LinprogSimplexTests): + + def setup_method(self): + self.options = {'bland': True} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError): + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexNoPresolve(LinprogSimplexTests): + + def setup_method(self): + self.options = {'presolve': False} + + is_32_bit = np.intp(0).itemsize < 8 + is_linux = sys.platform.startswith('linux') + + @pytest.mark.xfail( + condition=is_32_bit and is_linux, + reason='Fails with warning on 32-bit linux') + def test_bug_5400(self): + super().test_bug_5400() + + def test_bug_6139_low_tol(self): + # Linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + # Without ``presolve`` eliminating such rows the result is incorrect. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError, match='linprog status 4'): + return super().test_bug_6139() + + def test_bug_7237_low_tol(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + def test_unbounded_no_nontrivial_constraints_1(self): + pytest.skip("Tests behavior specific to presolve") + + def test_unbounded_no_nontrivial_constraints_2(self): + pytest.skip("Tests behavior specific to presolve") + + +####################################### +# Interior-Point Option-Specific Tests# +####################################### + + +class TestLinprogIPDense(LinprogIPTests): + options = {"sparse": False} + + # see https://github.com/scipy/scipy/issues/20216 for skip reason + @pytest.mark.skipif( + sys.platform == 'darwin', + reason="Fails on some macOS builds for reason not relevant to test" + ) + def test_bug_6139(self): + super().test_bug_6139() + +if has_cholmod: + class TestLinprogIPSparseCholmod(LinprogIPTests): + options = {"sparse": True, "cholesky": True} + + +if has_umfpack: + class TestLinprogIPSparseUmfpack(LinprogIPTests): + options = {"sparse": True, "cholesky": False} + + def test_network_flow_limited_capacity(self): + pytest.skip("Failing due to numerical issues on some platforms.") + + +class TestLinprogIPSparse(LinprogIPTests): + options = {"sparse": True, "cholesky": False, "sym_pos": False} + + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + def test_magic_square_sparse_no_presolve(self): + # test linprog with a problem with a rank-deficient A_eq matrix + A_eq, b_eq, c, _, _ = magic_square(3) + bounds = (0, 1) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + sup.filter(OptimizeWarning, "Solving system with option...") + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + def test_sparse_solve_options(self): + # checking that problem is solved with all column permutation options + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Invalid permc_spec option") + o = {key: self.options[key] for key in self.options} + permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', + 'COLAMD', 'ekki-ekki-ekki') + # 'ekki-ekki-ekki' raises warning about invalid permc_spec option + # and uses default + for permc_spec in permc_specs: + o["permc_spec"] = permc_spec + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + +class TestLinprogIPSparsePresolve(LinprogIPTests): + options = {"sparse": True, "_sparse_presolve": True} + + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + def test_enzo_example_c_with_infeasibility(self): + pytest.skip('_sparse_presolve=True incompatible with presolve=False') + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class TestLinprogIPSpecific: + method = "interior-point" + # the following tests don't need to be performed separately for + # sparse presolve, sparse after presolve, and dense + + def test_solver_select(self): + # check that default solver is selected as expected + if has_cholmod: + options = {'sparse': True, 'cholesky': True} + elif has_umfpack: + options = {'sparse': True, 'cholesky': False} + else: + options = {'sparse': True, 'cholesky': False, 'sym_pos': False} + A, b, c = lpgen_2d(20, 20) + res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) + res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver + assert_allclose(res1.fun, res2.fun, + err_msg="linprog default solver unexpected result", + rtol=2e-15, atol=1e-15) + + def test_unbounded_below_no_presolve_original(self): + # formerly caused segfault in TravisCI w/ "cholesky":True + c = [-1] + bounds = [(None, 1)] + res = linprog(c=c, bounds=bounds, + method=self.method, + options={"presolve": False, "cholesky": True}) + _assert_success(res, desired_fun=-1) + + def test_cholesky(self): + # use cholesky factorization and triangular solves + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"cholesky": True}) # only for dense + _assert_success(res, desired_fun=-64.049494229) + + def test_alternate_initial_point(self): + # use "improved" initial point + A, b, c = lpgen_2d(20, 20) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"ip": True, "disp": True}) + # ip code is independent of sparse/dense + _assert_success(res, desired_fun=-64.049494229) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options={"presolve": False}) + assert_(not res.success, "Incorrectly reported success") + + +######################################## +# Revised Simplex Option-Specific Tests# +######################################## + + +class TestLinprogRSCommon(LinprogRSTests): + options = {} + + def test_cyclic_bland(self): + pytest.skip("Intermittent failure acceptable.") + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_unbounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, None), (None, None), (0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, 1), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_negative_unbounded_variable(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + b_eq = [4] + x_star = np.array([-219/385, 582/385, 0, 4/10]) + f_star = 3951/385 + bounds = [(None, None), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + def test_redundant_constraints_with_guess(self): + A, b, c, _, _ = magic_square(3) + p = np.random.rand(*c.shape) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_eq=A, b_eq=b, method=self.method) + res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) + res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) + _assert_success(res2, desired_fun=1.730550597) + assert_equal(res2.nit, 0) + _assert_success(res3) + assert_(res3.nit < res.nit) # hot start reduces iterations + + +class TestLinprogRSBland(LinprogRSTests): + options = {"pivot": "bland"} + + +############################################ +# HiGHS-Simplex-Dual Option-Specific Tests # +############################################ + + +class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): + method = "highs-ds" + options = {} + + def test_lad_regression(self): + ''' + The scaled model should be optimal, i.e. not produce unscaled model + infeasible. See https://github.com/ERGO-Code/HiGHS/issues/494. + ''' + # Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled + # and unscaled model statuses) + c, A_ub, b_ub, bnds = l1_regression_prob() + res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, + method=self.method, options=self.options) + assert_equal(res.status, 0) + assert_(res.x is not None) + assert_(np.all(res.slack > -1e-6)) + assert_(np.all(res.x <= [np.inf if ub is None else ub + for lb, ub in bnds])) + assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7 + for lb, ub in bnds])) + + +################################### +# HiGHS-IPM Option-Specific Tests # +################################### + + +class TestLinprogHiGHSIPM(LinprogHiGHSTests): + method = "highs-ipm" + options = {} + + +################################### +# HiGHS-MIP Option-Specific Tests # +################################### + + +class TestLinprogHiGHSMIP: + method = "highs" + options = {} + + @pytest.mark.xfail(condition=(sys.maxsize < 2 ** 32 and + platform.system() == "Linux"), + run=False, + reason="gh-16347") + def test_mip1(self): + # solve non-relaxed magic square problem (finally!) + # also check that values are all integers - they don't always + # come out of HiGHS that way + n = 4 + A, b, c, numbers, M = magic_square(n) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c*0, A_eq=A, b_eq=b, bounds=bounds, + method=self.method, integrality=integrality) + + s = (numbers.flatten() * res.x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + np.testing.assert_allclose(res.x, np.round(res.x), atol=1e-12) + + def test_mip2(self): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + + # use all array inputs to test gh-16681 (integrality couldn't be array) + A_ub = np.array([[2, -2], [-8, 10]]) + b_ub = np.array([-1, 13]) + c = -np.array([1, 1]) + + bounds = np.array([(0, np.inf)] * len(c)) + integrality = np.ones_like(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1, 2]) + np.testing.assert_allclose(res.fun, -3) + + def test_mip3(self): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + def test_mip4(self): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[-1, -2], [-4, -1], [2, 1]]) + b_ub = np.array([14, -33, 20]) + c = np.array([8, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [0, 1] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [6.5, 7]) + np.testing.assert_allclose(res.fun, 59) + + def test_mip5(self): + # solve MIP with inequality and inequality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[1, 1, 1]]) + b_ub = np.array([7]) + A_eq = np.array([[4, 2, 1]]) + b_eq = np.array([12]) + c = np.array([-3, -2, -1]) + + bounds = [(0, np.inf), (0, np.inf), (0, 1)] + integrality = [0, 1, 0] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality) + + np.testing.assert_allclose(res.x, [0, 6, 0]) + np.testing.assert_allclose(res.fun, -12) + + # gh-16897: these fields were not present, ensure that they are now + assert res.get("mip_node_count", None) is not None + assert res.get("mip_dual_bound", None) is not None + assert res.get("mip_gap", None) is not None + + @pytest.mark.slow + @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job + def test_mip6(self): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + @pytest.mark.xslow + def test_mip_rel_gap_passdown(self): + # MIP taken from test_mip6, solved with different values of mip_rel_gap + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + mip_rel_gaps = [0.5, 0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality, + options={"mip_rel_gap": mip_rel_gap}) + final_mip_gap = res["mip_gap"] + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert final_mip_gap <= mip_rel_gap + sol_mip_gaps.append(final_mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. np.diff does + # x[i+1] - x[i], so flip the array before differencing to get + # what should be a positive, monotone decreasing series of solution + # gaps + gap_diffs = np.diff(np.flip(sol_mip_gaps)) + assert np.all(gap_diffs >= 0) + assert not np.all(gap_diffs == 0) + + def test_semi_continuous(self): + # See issue #18106. This tests whether the solution is being + # checked correctly (status is 0) when integrality > 1: + # values are allowed to be 0 even if 0 is out of bounds. + + c = np.array([1., 1., -1, -1]) + bounds = np.array([[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0.5, 1.5]]) + integrality = np.array([2, 3, 2, 3]) + + res = linprog(c, bounds=bounds, + integrality=integrality, method='highs') + + np.testing.assert_allclose(res.x, [0, 0, 1.5, 1]) + assert res.status == 0 + + +########################### +# Autoscale-Specific Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class AutoscaleTests: + options = {"autoscale": True} + + test_bug_6139 = LinprogCommonTests.test_bug_6139 + test_bug_6690 = LinprogCommonTests.test_bug_6690 + test_bug_7237 = LinprogCommonTests.test_bug_7237 + + +class TestAutoscaleIP(AutoscaleTests): + method = "interior-point" + + def test_bug_6139(self): + self.options['tol'] = 1e-10 + return AutoscaleTests.test_bug_6139(self) + + +class TestAutoscaleSimplex(AutoscaleTests): + method = "simplex" + + +class TestAutoscaleRS(AutoscaleTests): + method = "revised simplex" + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + +########################### +# Redundancy Removal Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class RRTests: + method = "interior-point" + LCT = LinprogCommonTests + # these are a few of the existing tests that have redundancy + test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility + test_bug_10349 = LCT.test_bug_10349 + test_bug_7044 = LCT.test_bug_7044 + test_NFLC = LCT.test_network_flow_limited_capacity + test_enzo_example_b = LCT.test_enzo_example_b + + +class TestRRSVD(RRTests): + options = {"rr_method": "SVD"} + + +class TestRRPivot(RRTests): + options = {"rr_method": "pivot"} + + +class TestRRID(RRTests): + options = {"rr_method": "ID"} diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py new file mode 100644 index 0000000000000000000000000000000000000000..650deedce88b6babd8a3f2b62a5839f1a6cb966c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py @@ -0,0 +1,297 @@ +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +import numpy as np + +from scipy.optimize._lsq.common import ( + step_size_to_bound, find_active_constraints, make_strictly_feasible, + CL_scaling_vector, intersect_trust_region, build_quadratic_1d, + minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, + left_multiplied_operator, right_multiplied_operator) + + +class TestBounds: + def test_step_size_to_bounds(self): + lb = np.array([-1.0, 2.5, 10.0]) + ub = np.array([1.0, 5.0, 100.0]) + x = np.array([0.0, 2.5, 12.0]) + + s = np.array([0.1, 0.0, 0.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 10) + assert_equal(hits, [1, 0, 0]) + + s = np.array([0.01, 0.05, -1.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 2) + assert_equal(hits, [0, 0, -1]) + + s = np.array([10.0, -0.0001, 100.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.array(-0)) + assert_equal(hits, [0, -1, 0]) + + s = np.array([1.0, 0.5, -2.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 1.0) + assert_equal(hits, [1, 0, -1]) + + s = np.zeros(3) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.inf) + assert_equal(hits, [0, 0, 0]) + + def test_find_active_constraints(self): + lb = np.array([0.0, -10.0, 1.0]) + ub = np.array([1.0, 0.0, 100.0]) + + x = np.array([0.5, -5.0, 2.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 0]) + + x = np.array([0.0, 0.0, 10.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + x = np.array([1e-9, -1e-8, 100 - 1e-9]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 1]) + + active = find_active_constraints(x, lb, ub, rtol=1.5e-9) + assert_equal(active, [-1, 0, 1]) + + lb = np.array([1.0, -np.inf, -np.inf]) + ub = np.array([np.inf, 10.0, np.inf]) + + x = np.ones(3) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 0, 0]) + + # Handles out-of-bound cases. + x = np.array([0.0, 11.0, 0.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + def test_make_strictly_feasible(self): + lb = np.array([-0.5, -0.8, 2.0]) + ub = np.array([0.8, 1.0, 3.0]) + + x = np.array([-0.5, 0.0, 2 + 1e-10]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(x_new[0] > -0.5) + assert_equal(x_new[1:], x[1:]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) + assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) + + x = np.array([-0.5, -1, 3.1]) + x_new = make_strictly_feasible(x, lb, ub) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + lb = np.array([-1, 100.0]) + ub = np.array([1, 100.0 + 1e-10]) + x = np.array([0, 100.0]) + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) + assert_equal(x_new, [0, 100.0 + 0.5e-10]) + + def test_scaling_vector(self): + lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) + ub = np.array([1.0, np.inf, 10.0, np.inf]) + x = np.array([0.5, 2.0, 5.0, 0.0]) + g = np.array([1.0, 0.1, -10.0, 0.0]) + v, dv = CL_scaling_vector(x, g, lb, ub) + assert_equal(v, [1.0, 7.0, 5.0, 1.0]) + assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) + + +class TestQuadraticFunction: + def setup_method(self): + self.J = np.array([ + [0.1, 0.2], + [-1.0, 1.0], + [0.5, 0.2]]) + self.g = np.array([0.8, -2.0]) + self.diag = np.array([1.0, 2.0]) + + def test_build_quadratic_1d(self): + s = np.zeros(2) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 0) + assert_equal(b, 0) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 0) + assert_equal(b, 0) + + s = np.array([1.0, -1.0]) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 2.05) + assert_equal(b, 2.8) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 3.55) + assert_equal(b, 2.8) + + s0 = np.array([0.5, 0.5]) + a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) + assert_equal(a, 3.55) + assert_allclose(b, 2.39) + assert_allclose(c, -0.1525) + + def test_minimize_quadratic_1d(self): + a = 5 + b = -1 + + t, y = minimize_quadratic_1d(a, b, 1, 2) + assert_equal(t, 1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -2, -1) + assert_equal(t, -1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -1, 1) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + c = 10 + t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) + assert_equal(t, 0) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + a = -1 + b = 0.2 + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf) + assert_equal(t, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0) + assert_equal(t, -np.inf) + assert_equal(y, -np.inf) + + def test_evaluate_quadratic(self): + s = np.array([1.0, -1.0]) + + value = evaluate_quadratic(self.J, self.g, s) + assert_equal(value, 4.85) + + value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_equal(value, 6.35) + + s = np.array([[1.0, -1.0], + [1.0, 1.0], + [0.0, 0.0]]) + + values = evaluate_quadratic(self.J, self.g, s) + assert_allclose(values, [4.85, -0.91, 0.0]) + + values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_allclose(values, [6.35, 0.59, 0.0]) + + +class TestTrustRegion: + def test_intersect(self): + Delta = 1.0 + + x = np.zeros(3) + s = np.array([1.0, 0.0, 0.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_equal(t_neg, -1) + assert_equal(t_pos, 1) + + s = np.array([-1.0, 1.0, -1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -3**-0.5) + assert_allclose(t_pos, 3**-0.5) + + x = np.array([0.5, -0.5, 0]) + s = np.array([0, 0, 1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -2**-0.5) + assert_allclose(t_pos, 2**-0.5) + + x = np.ones(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + x = np.zeros(3) + s = np.zeros(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + +def test_reflective_transformation(): + lb = np.array([-1, -2], dtype=float) + ub = np.array([5, 3], dtype=float) + + y = np.array([0, 0]) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, y) + assert_equal(g, np.ones(2)) + + y = np.array([-4, 4], dtype=float) + + x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf])) + assert_equal(x, [2, 4]) + assert_equal(g, [-1, 1]) + + x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub) + assert_equal(x, [-4, 2]) + assert_equal(g, [1, -1]) + + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [2, 2]) + assert_equal(g, [-1, -1]) + + lb = np.array([-np.inf, -2]) + ub = np.array([5, np.inf]) + y = np.array([10, 10], dtype=float) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [0, 10]) + assert_equal(g, [-1, 1]) + + +def test_linear_operators(): + A = np.arange(6).reshape((3, 2)) + + d_left = np.array([-1, 2, 5]) + DA = np.diag(d_left).dot(A) + J_left = left_multiplied_operator(A, d_left) + + d_right = np.array([5, 10]) + AD = A.dot(np.diag(d_right)) + J_right = right_multiplied_operator(A, d_right) + + x = np.array([-2, 3]) + X = -2 * np.arange(2, 8).reshape((2, 3)) + xt = np.array([0, -2, 15]) + + assert_allclose(DA.dot(x), J_left.matvec(x)) + assert_allclose(DA.dot(X), J_left.matmat(X)) + assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) + + assert_allclose(AD.dot(x), J_right.matvec(x)) + assert_allclose(AD.dot(X), J_right.matmat(X)) + assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..348f4fc1350c0c92f3652a594c61818c19e2cde2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py @@ -0,0 +1,284 @@ +import pytest + +import numpy as np +from numpy.linalg import lstsq +from numpy.testing import assert_allclose, assert_equal, assert_ + +from scipy.sparse import rand, coo_matrix +from scipy.sparse.linalg import aslinearoperator +from scipy.optimize import lsq_linear +from scipy.optimize._minimize import Bounds + + +A = np.array([ + [0.171, -0.057], + [-0.049, -0.248], + [-0.166, 0.054], +]) +b = np.array([0.074, 1.014, -0.383]) + + +class BaseMixin: + def setup_method(self): + self.rnd = np.random.RandomState(0) + + def test_dense_no_bounds(self): + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.x, res.unbounded_sol[0]) + + def test_dense_bounds(self): + # Solutions for comparison are taken from MATLAB. + lb = np.array([-1, -10]) + ub = np.array([1, 0]) + unbounded_sol = lstsq(A, b, rcond=-1)[0] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0.0, -np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.0, -4.084174437334673]), + atol=1e-6) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([-1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.448427311733504, 0]), + atol=1e-15) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([np.inf, -5]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-0.105560998682388, -5])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([-1, np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-1, -4.181102129483254])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0, -4]) + ub = np.array([1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.005236663400791, -4])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + def test_bounds_variants(self): + x = np.array([1, 3]) + A = self.rnd.uniform(size=(2, 2)) + b = A@x + lb = np.array([1, 1]) + ub = np.array([2, 2]) + bounds_old = (lb, ub) + bounds_new = Bounds(lb, ub) + res_old = lsq_linear(A, b, bounds_old) + res_new = lsq_linear(A, b, bounds_new) + assert not np.allclose(res_new.x, res_new.unbounded_sol[0]) + assert_allclose(res_old.x, res_new.x) + + def test_np_matrix(self): + # gh-10711 + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]]) + k = np.array([20, 15]) + lsq_linear(A, k) + + def test_dense_rank_deficient(self): + A = np.array([[-0.307, -0.184]]) + b = np.array([0.773]) + lb = [-0.1, -0.1] + ub = [0.1, 0.1] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, [-0.1, -0.1]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + A = np.array([ + [0.334, 0.668], + [-0.516, -1.032], + [0.192, 0.384], + ]) + b = np.array([-1.436, 0.135, 0.909]) + lb = [0, -1] + ub = [1, -0.5] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.optimality, 0, atol=1e-11) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + def test_full_result(self): + lb = np.array([0, -4]) + ub = np.array([1, 0]) + res = lsq_linear(A, b, (lb, ub), method=self.method) + + assert_allclose(res.x, [0.005236663400791, -4]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + r = A.dot(res.x) - b + assert_allclose(res.cost, 0.5 * np.dot(r, r)) + assert_allclose(res.fun, r) + + assert_allclose(res.optimality, 0.0, atol=1e-12) + assert_equal(res.active_mask, [0, -1]) + assert_(res.nit < 15) + assert_(res.status == 1 or res.status == 3) + assert_(isinstance(res.message, str)) + assert_(res.success) + + # This is a test for issue #9982. + def test_almost_singular(self): + A = np.array( + [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789], + [0.3742460132129041, 0.0130523214078376, 0.0130523214077873], + [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]]) + + b = np.array( + [0.0055029366538097, 0.0026677442422208, 0.0066612514782381]) + + result = lsq_linear(A, b, method=self.method) + assert_(result.cost < 1.1e-8) + + @pytest.mark.xslow + def test_large_rank_deficient(self): + np.random.seed(0) + n, m = np.sort(np.random.randint(2, 1000, size=2)) + m *= 2 # make m >> n + A = 1.0 * np.random.randint(-99, 99, size=[m, n]) + b = 1.0 * np.random.randint(-99, 99, size=[m]) + bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0) + bounds[1, :] += 1.0 # ensure up > lb + + # Make the A matrix strongly rank deficient by replicating some columns + w = np.random.choice(n, n) # Select random columns with duplicates + A = A[:, w] + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + def test_convergence_small_matrix(self): + A = np.array([[49.0, 41.0, -32.0], + [-19.0, -32.0, -8.0], + [-13.0, 10.0, 69.0]]) + b = np.array([-41.0, -90.0, 47.0]) + bounds = np.array([[31.0, -44.0, 26.0], + [54.0, -32.0, 28.0]]) + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + +class SparseMixin: + def test_sparse_and_LinearOperator(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + A = aslinearoperator(A) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + def test_sparse_bounds(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + lb = self.rnd.randn(n) + ub = lb + 1 + res = lsq_linear(A, b, (lb, ub)) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13, + lsmr_maxiter=1500) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto') + assert_allclose(res.optimality, 0.0, atol=1e-6) + + def test_sparse_ill_conditioned(self): + # Sparse matrix with condition number of ~4 million + data = np.array([1., 1., 1., 1. + 1e-6, 1.]) + row = np.array([0, 0, 1, 2, 2]) + col = np.array([0, 2, 1, 0, 2]) + A = coo_matrix((data, (row, col)), shape=(3, 3)) + + # Get the exact solution + exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact') + + # Default lsmr arguments should not fully converge the solution + default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr') + with pytest.raises(AssertionError, match=""): + assert_allclose(exact_sol.x, default_lsmr_sol.x) + + # By increasing the maximum lsmr iters, it will converge + conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10) + assert_allclose(exact_sol.x, conv_lsmr.x) + + +class TestTRF(BaseMixin, SparseMixin): + method = 'trf' + lsq_solvers = ['exact', 'lsmr'] + + +class TestBVLS(BaseMixin): + method = 'bvls' + lsq_solvers = ['exact'] + + +class TestErrorChecking: + def test_option_lsmr_tol(self): + # Should work with a positive float, string equal to 'auto', or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto') + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None) + + # Should raise error with negative float, strings + # other than 'auto', and integers + err_message = "`lsmr_tol` must be None, 'auto', or positive float." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo') + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1) + + def test_option_lsmr_maxiter(self): + # Should work with positive integers or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None) + + # Should raise error with 0 or negative max iter + err_message = "`lsmr_maxiter` must be None or positive integer." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..0970a15a8bccc3deb7bc67f7b62763947c1b237c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py @@ -0,0 +1,385 @@ +""" +Unit test for Mixed Integer Linear Programming +""" +import re + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest + +from .test_linprog import magic_square +from scipy.optimize import milp, Bounds, LinearConstraint +from scipy import sparse + + +def test_milp_iv(): + + message = "`c` must be a dense array" + with pytest.raises(ValueError, match=message): + milp(sparse.coo_array([0, 0])) + + message = "`c` must be a one-dimensional array of finite numbers with" + with pytest.raises(ValueError, match=message): + milp(np.zeros((3, 4))) + with pytest.raises(ValueError, match=message): + milp([]) + with pytest.raises(ValueError, match=message): + milp(None) + + message = "`bounds` must be convertible into an instance of..." + with pytest.raises(ValueError, match=message): + milp(1, bounds=10) + + message = "`constraints` (or each element within `constraints`) must be" + with pytest.raises(ValueError, match=re.escape(message)): + milp(1, constraints=10) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3])) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(2), constraints=([[1, 2]], [2], sparse.coo_array([2]))) + + message = "The shape of `A` must be (len(b_l), len(c))." + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2]], [2], [2])) + + message = "`integrality` must be a dense array" + with pytest.raises(ValueError, match=message): + milp([1, 2], integrality=sparse.coo_array([1, 2])) + + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 2]) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 5, 3]) + + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], sparse.coo_array([3, 4]))) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [4, 5])) + + message = "`bounds.lb` and `bounds.ub` must contain reals and..." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5])) + + +@pytest.mark.xfail(run=False, + reason="Needs to be fixed in `_highs_wrapper`") +def test_milp_options(capsys): + # run=False now because of gh-16347 + message = "Unrecognized options detected: {'ekki'}..." + options = {'ekki': True} + with pytest.warns(RuntimeWarning, match=message): + milp(1, options=options) + + A, b, c, numbers, M = magic_square(3) + options = {"disp": True, "presolve": False, "time_limit": 0.05} + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options=options) + + captured = capsys.readouterr() + assert "Presolve is switched off" in captured.out + assert "Time Limit Reached" in captured.out + assert not res.success + + +def test_result(): + A, b, c, numbers, M = magic_square(3) + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1) + assert res.status == 0 + assert res.success + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.message.startswith(msg) + assert isinstance(res.x, np.ndarray) + assert isinstance(res.fun, float) + assert isinstance(res.mip_node_count, int) + assert isinstance(res.mip_dual_bound, float) + assert isinstance(res.mip_gap, float) + + A, b, c, numbers, M = magic_square(6) + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options={'time_limit': 0.05}) + assert res.status == 1 + assert not res.success + msg = "Time limit reached. (HiGHS Status 13:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(1, bounds=(1, -1)) + assert res.status == 2 + assert not res.success + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(-1) + assert res.status == 3 + assert not res.success + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + +def test_milp_optional_args(): + # check that arguments other than `c` are indeed optional + res = milp(1) + assert res.fun == 0 + assert_array_equal(res.x, [0]) + + +def test_milp_1(): + # solve magic square problem + n = 3 + A, b, c, numbers, M = magic_square(n) + A = sparse.csc_array(A) # confirm that sparse arrays are accepted + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1) + + # check that solution is a magic square + x = np.round(res.x) + s = (numbers.flatten() * x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + +def test_milp_2(): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + # also check that `milp` accepts all valid ways of specifying constraints + c = -np.ones(2) + A = [[-2, 2], [-8, 10]] + b_l = [1, -np.inf] + b_u = [np.inf, 13] + linear_constraint = LinearConstraint(A, b_l, b_u) + + # solve original problem + res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True) + res2 = milp(c=c, constraints=linear_constraint, integrality=True) + res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True) + res4 = milp(c=c, constraints=[linear_constraint], integrality=True) + res5 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + (A[1:], b_l[1:], b_u[1:])]) + res6 = milp(c=c, integrality=True, + constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + res7 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x]) + funs = np.array([res1.fun, res2.fun, res3.fun, + res4.fun, res5.fun, res6.fun, res7.fun]) + np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape)) + np.testing.assert_allclose(funs, -3) + + # solve relaxed problem + res = milp(c=c, constraints=(A, b_l, b_u)) + np.testing.assert_allclose(res.x, [4, 4.5]) + np.testing.assert_allclose(res.fun, -8.5) + + +def test_milp_3(): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + c = [0, -1] + A = [[-1, 1], [3, 2], [2, 3]] + b_u = [1, 12, 12] + b_l = np.full_like(b_u, -np.inf, dtype=np.float64) + constraints = LinearConstraint(A, b_l, b_u) + + integrality = np.ones_like(c) + + # solve original problem + res = milp(c=c, constraints=constraints, integrality=integrality) + assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + # solve relaxed problem + res = milp(c=c, constraints=constraints) + assert_allclose(res.fun, -2.8) + assert_allclose(res.x, [1.8, 2.8]) + + +def test_milp_4(): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [8, 1] + integrality = [0, 1] + A = [[1, 2], [-4, -1], [2, 1]] + b_l = [-14, -np.inf, -np.inf] + b_u = [np.inf, -33, 20] + constraints = LinearConstraint(A, b_l, b_u) + bounds = Bounds(-np.inf, np.inf) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + assert_allclose(res.fun, 59) + assert_allclose(res.x, [6.5, 7]) + + +def test_milp_5(): + # solve MIP with inequality and equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [-3, -2, -1] + integrality = [0, 0, 1] + lb = [0, 0, 0] + ub = [np.inf, np.inf, 1] + bounds = Bounds(lb, ub) + A = [[1, 1, 1], [4, 2, 1]] + b_l = [-np.inf, 12] + b_u = [7, 12] + constraints = LinearConstraint(A, b_l, b_u) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + # there are multiple solutions + assert_allclose(res.fun, -12) + + +@pytest.mark.slow +@pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job +def test_milp_6(): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + integrality = 1 + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + +def test_infeasible_prob_16609(): + # Ensure presolve does not mark trivially infeasible problems + # as Optimal -- see gh-16609 + c = [1.0, 0.0] + integrality = [0, 1] + + lb = [0, -np.inf] + ub = [np.inf, np.inf] + bounds = Bounds(lb, ub) + + A_eq = [[0.0, 1.0]] + b_eq = [0.5] + constraints = LinearConstraint(A_eq, b_eq, b_eq) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + np.testing.assert_equal(res.status, 2) + + +_msg_time = "Time limit reached. (HiGHS Status 13:" +_msg_iter = "Iteration limit reached. (HiGHS Status 14:" + + +@pytest.mark.skipif(np.intp(0).itemsize < 8, + reason="Unhandled 32-bit GCC FP bug") +@pytest.mark.slow +@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 0.1}, _msg_time), + ({"node_limit": 1}, _msg_iter)]) +def test_milp_timeout_16545(options, msg): + # Ensure solution is not thrown away if MILP solver times out + # -- see gh-16545 + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(100, 100)) + b_lb = np.full(100, fill_value=-np.inf) + b_ub = np.full(100, fill_value=25) + constraints = LinearConstraint(A, b_lb, b_ub) + variable_lb = np.zeros(100) + variable_ub = np.ones(100) + variable_bounds = Bounds(variable_lb, variable_ub) + integrality = np.ones(100) + c_vector = -np.ones(100) + res = milp( + c_vector, + integrality=integrality, + bounds=variable_bounds, + constraints=constraints, + options=options, + ) + + assert res.message.startswith(msg) + assert res["x"] is not None + + # ensure solution is feasible + x = res["x"] + tol = 1e-8 # sometimes needed due to finite numerical precision + assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol) + assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol) + assert np.allclose(x, np.round(x)) + + +def test_three_constraints_16878(): + # `milp` failed when exactly three constraints were passed + # Ensure that this is no longer the case. + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(6, 6)) + bl = np.full(6, fill_value=-np.inf) + bu = np.full(6, fill_value=10) + constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]), + LinearConstraint(A[2:4], bl[2:4], bu[2:4]), + LinearConstraint(A[4:], bl[4:], bu[4:])] + constraints2 = [(A[:2], bl[:2], bu[:2]), + (A[2:4], bl[2:4], bu[2:4]), + (A[4:], bl[4:], bu[4:])] + lb = np.zeros(6) + ub = np.ones(6) + variable_bounds = Bounds(lb, ub) + c = -np.ones(6) + res1 = milp(c, bounds=variable_bounds, constraints=constraints) + res2 = milp(c, bounds=variable_bounds, constraints=constraints2) + ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu)) + assert res1.success and res2.success + assert_allclose(res1.x, ref.x) + assert_allclose(res2.x, ref.x) + + +@pytest.mark.xslow +def test_mip_rel_gap_passdown(): + # Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases + # Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown + # MIP taken from test_mip_6 above + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + mip_rel_gaps = [0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq), + integrality=True, options={"mip_rel_gap": mip_rel_gap}) + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert res.mip_gap <= mip_rel_gap + # check that `res.mip_gap` is as defined in the documentation + assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun + sol_mip_gaps.append(res.mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. + assert np.all(np.diff(sol_mip_gaps) < 0) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py new file mode 100644 index 0000000000000000000000000000000000000000..6dad4bad5a3965c6c414be4a1cd0cb097ccaff1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py @@ -0,0 +1,808 @@ +import numpy as np +import pytest +from scipy.linalg import block_diag +from scipy.sparse import csc_matrix +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_less, assert_, assert_allclose, + suppress_warnings) +from scipy.optimize import (NonlinearConstraint, + LinearConstraint, + Bounds, + minimize, + BFGS, + SR1) + + +class Maratos: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[2*x[0], 2*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosTestArgs: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.a = a + self.b = b + self.bounds = None + + def _test_args(self, a, b): + if self.a != a or self.b != b: + raise ValueError() + + def fun(self, x, a, b): + self._test_args(a, b) + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x, a, b): + self._test_args(a, b) + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x, a, b): + self._test_args(a, b) + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosGradInFunc: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return (2*(x[0]**2 + x[1]**2 - 1) - x[0], + np.array([4*x[0]-1, 4*x[1]])) + + @property + def grad(self): + return True + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class HyperbolicIneq: + """Problem 15.1 from Nocedal and Wright + + The following optimization problem: + minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + Subject to: 1/(x[0] + 1) - x[1] >= 1/4 + x[0] >= 0 + x[1] >= 0 + """ + def __init__(self, constr_jac=None, constr_hess=None): + self.x0 = [0, 0] + self.x_opt = [1.952823, 0.088659] + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = Bounds(0, np.inf) + + def fun(self, x): + return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + + def grad(self, x): + return [x[0] - 2, x[1] - 1/2] + + def hess(self, x): + return np.eye(2) + + @property + def constr(self): + def fun(x): + return 1/(x[0] + 1) - x[1] + + if self.constr_jac is None: + def jac(x): + return [[-1/(x[0] + 1)**2, -1]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0], + [0, 0]]) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 0.25, np.inf, jac, hess) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + self.bounds = None + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + @property + def constr(self): + return () + + +class IneqRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.5022, 0.2489] + self.bounds = None + + @property + def constr(self): + A = [[1, 2]] + b = 1 + return LinearConstraint(A, -np.inf, b) + + +class BoundedRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: -2 <= x[0] <= 0 + 0 <= x[1] <= 2 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-0.2, 0.2] + self.x_opt = None + self.bounds = Bounds([-2, 0], [0, 2]) + + +class EqIneqRosenbrock(Rosenbrock): + """Rosenbrock subject to equality and inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + 2 x[0] + x[1] = 1 + + Taken from matlab ``fimincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.41494, 0.17011] + self.bounds = None + + @property + def constr(self): + A_ineq = [[1, 2]] + b_ineq = 1 + A_eq = [[2, 1]] + b_eq = 1 + return (LinearConstraint(A_ineq, -np.inf, b_ineq), + LinearConstraint(A_eq, b_eq, b_eq)) + + +class Elec: + """Distribution of electrons on a sphere. + + Problem no 2 from COPS collection [2]_. Find + the equilibrium state distribution (of minimal + potential) of the electrons positioned on a + conducting sphere. + + References + ---------- + .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson, + "Benchmarking optimization software with COPS 3.0.", + Argonne National Lab., Argonne, IL (US), 2004. + """ + def __init__(self, n_electrons=200, random_state=0, + constr_jac=None, constr_hess=None): + self.n_electrons = n_electrons + self.rng = np.random.RandomState(random_state) + # Initial Guess + phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons) + theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons) + x = np.cos(theta) * np.cos(phi) + y = np.cos(theta) * np.sin(phi) + z = np.sin(theta) + self.x0 = np.hstack((x, y, z)) + self.x_opt = None + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def _get_cordinates(self, x): + x_coord = x[:self.n_electrons] + y_coord = x[self.n_electrons:2 * self.n_electrons] + z_coord = x[2 * self.n_electrons:] + return x_coord, y_coord, z_coord + + def _compute_coordinate_deltas(self, x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + dx = x_coord[:, None] - x_coord + dy = y_coord[:, None] - y_coord + dz = z_coord[:, None] - z_coord + return dx, dy, dz + + def fun(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + with np.errstate(divide='ignore'): + dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 + dm1[np.diag_indices_from(dm1)] = 0 + return 0.5 * np.sum(dm1) + + def grad(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + + with np.errstate(divide='ignore'): + dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 + dm3[np.diag_indices_from(dm3)] = 0 + + grad_x = -np.sum(dx * dm3, axis=1) + grad_y = -np.sum(dy * dm3, axis=1) + grad_z = -np.sum(dz * dm3, axis=1) + + return np.hstack((grad_x, grad_y, grad_z)) + + def hess(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + d = (dx**2 + dy**2 + dz**2) ** 0.5 + + with np.errstate(divide='ignore'): + dm3 = d ** -3 + dm5 = d ** -5 + + i = np.arange(self.n_electrons) + dm3[i, i] = 0 + dm5[i, i] = 0 + + Hxx = dm3 - 3 * dx**2 * dm5 + Hxx[i, i] = -np.sum(Hxx, axis=1) + + Hxy = -3 * dx * dy * dm5 + Hxy[i, i] = -np.sum(Hxy, axis=1) + + Hxz = -3 * dx * dz * dm5 + Hxz[i, i] = -np.sum(Hxz, axis=1) + + Hyy = dm3 - 3 * dy**2 * dm5 + Hyy[i, i] = -np.sum(Hyy, axis=1) + + Hyz = -3 * dy * dz * dm5 + Hyz[i, i] = -np.sum(Hyz, axis=1) + + Hzz = dm3 - 3 * dz**2 * dm5 + Hzz[i, i] = -np.sum(Hzz, axis=1) + + H = np.vstack(( + np.hstack((Hxx, Hxy, Hxz)), + np.hstack((Hxy, Hyy, Hyz)), + np.hstack((Hxz, Hyz, Hzz)) + )) + + return H + + @property + def constr(self): + def fun(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + return x_coord**2 + y_coord**2 + z_coord**2 - 1 + + if self.constr_jac is None: + def jac(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + Jx = 2 * np.diag(x_coord) + Jy = 2 * np.diag(y_coord) + Jz = 2 * np.diag(z_coord) + return csc_matrix(np.hstack((Jx, Jy, Jz))) + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + D = 2 * np.diag(v) + return block_diag(D, D, D) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, -np.inf, 0, jac, hess) + + +class TestTrustRegionConstr(TestCase): + + @pytest.mark.slow + def test_list_of_problems(self): + list_of_problems = [Maratos(), + Maratos(constr_hess='2-point'), + Maratos(constr_hess=SR1()), + Maratos(constr_jac='2-point', constr_hess=SR1()), + MaratosGradInFunc(), + HyperbolicIneq(), + HyperbolicIneq(constr_hess='3-point'), + HyperbolicIneq(constr_hess=BFGS()), + HyperbolicIneq(constr_jac='3-point', + constr_hess=BFGS()), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + BoundedRosenbrock(), + Elec(n_electrons=2), + Elec(n_electrons=2, constr_hess='2-point'), + Elec(n_electrons=2, constr_hess=SR1()), + Elec(n_electrons=2, constr_jac='3-point', + constr_hess=SR1())] + + for prob in list_of_problems: + for grad in (prob.grad, '3-point', False): + for hess in (prob.hess, + '3-point', + SR1(), + BFGS(exception_strategy='damp_update'), + BFGS(exception_strategy='skip_update')): + + # Remove exceptions + if grad in ('2-point', '3-point', 'cs', False) and \ + hess in ('2-point', '3-point', 'cs'): + continue + if prob.grad is True and grad in ('3-point', False): + continue + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=grad, hess=hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, + decimal=5) + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_default_jac_and_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_default_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', + jac='2-point') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_no_constraints(self): + prob = Rosenbrock() + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hess=prob.hess) + result1 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='2-point') + + result2 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='3-point') + assert_array_almost_equal(result.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) + + def test_hessp(self): + prob = Maratos() + + def hessp(x, p): + H = prob.hess(x) + return H.dot(p) + + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hessp=hessp, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_args(self): + prob = MaratosTestArgs("a", 234) + + result = minimize(prob.fun, prob.x0, ("a", 234), + method='trust-constr', + jac=prob.grad, hess=prob.hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_raise_exception(self): + prob = Maratos() + message = "Whenever the gradient is estimated via finite-differences" + with pytest.raises(ValueError, match=message): + minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point', + hess='2-point', constraints=prob.constr) + + def test_issue_9044(self): + # https://github.com/scipy/scipy/issues/9044 + # Test the returned `OptimizeResult` contains keys consistent with + # other solvers. + + def callback(x, info): + assert_('nit' in info) + assert_('niter' in info) + + result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, + hess=lambda x: 2, callback=callback, + method='trust-constr') + assert_(result.get('success')) + assert_(result.get('nit', -1) == 1) + + # Also check existence of the 'niter' attribute, for backward + # compatibility + assert_(result.get('niter', -1) == 1) + + def test_issue_15093(self): + # scipy docs define bounds as inclusive, so it shouldn't be + # an issue to set x0 on the bounds even if keep_feasible is + # True. Previously, trust-constr would treat bounds as + # exclusive. + + x0 = np.array([0., 0.5]) + + def obj(x): + x1 = x[0] + x2 = x[1] + return x1 ** 2 + x2 ** 2 + + bounds = Bounds(np.array([0., 0.]), np.array([1., 1.]), + keep_feasible=True) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize( + method='trust-constr', + fun=obj, + x0=x0, + bounds=bounds) + + assert result['success'] + +class TestEmptyConstraint(TestCase): + """ + Here we minimize x^2+y^2 subject to x^2-y^2>1. + The actual minimum is at (0, 0) which fails the constraint. + Therefore we will find a minimum on the boundary at (+/-1, 0). + + When minimizing on the boundary, optimize uses a set of + constraints that removes the constraint that sets that + boundary. In our case, there's only one constraint, so + the result is an empty constraint. + + This tests that the empty constraint works. + """ + def test_empty_constraint(self): + + def function(x): + return x[0]**2 + x[1]**2 + + def functionjacobian(x): + return np.array([2.*x[0], 2.*x[1]]) + + def functionhvp(x, v): + return 2.*v + + def constraint(x): + return np.array([x[0]**2 - x[1]**2]) + + def constraintjacobian(x): + return np.array([[2*x[0], -2*x[1]]]) + + def constraintlcoh(x, v): + return np.array([[2., 0.], [0., -2.]]) * v[0] + + constraint = NonlinearConstraint(constraint, 1., np.inf, + constraintjacobian, constraintlcoh) + + startpoint = [1., 2.] + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + + result = minimize( + function, + startpoint, + method='trust-constr', + jac=functionjacobian, + hessp=functionhvp, + constraints=[constraint], + bounds=bounds, + ) + + assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4) + + +def test_bug_11886(): + def opt(x): + return x[0]**2+x[1]**2 + + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix(np.diag([1, 1])) + lin_cons = LinearConstraint(A, -1, np.inf) + # just checking that there are no errors + minimize(opt, 2*[1], constraints = lin_cons) + + +# Remove xfail when gh-11649 is resolved +@pytest.mark.xfail(reason="Known bug in trust-constr; see gh-11649.", + strict=True) +def test_gh11649(): + bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True) + + def assert_inbounds(x): + assert np.all(x >= bnds.lb) + assert np.all(x <= bnds.ub) + + def obj(x): + assert_inbounds(x) + return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1) + + def nce(x): + assert_inbounds(x) + return x[0]**2 + x[1] + + def nci(x): + assert_inbounds(x) + return x[0]*x[1] + + x0 = np.array((0.99, -0.99)) + nlcs = [NonlinearConstraint(nci, -10, np.inf), + NonlinearConstraint(nce, 1, 1)] + + res = minimize(fun=obj, x0=x0, method='trust-constr', + bounds=bnds, constraints=nlcs) + assert res.success + assert_inbounds(res.x) + assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub + assert_allclose(nce(res.x), nlcs[1].ub) + + ref = minimize(fun=obj, x0=x0, method='slsqp', + bounds=bnds, constraints=nlcs) + assert_allclose(res.fun, ref.fun) + + +class TestBoundedNelderMead: + + @pytest.mark.parametrize('bounds, x_opt', + [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt), + (Bounds(-np.inf, -0.8), [-0.8, -0.8]), + (Bounds(3.0, np.inf), [3.0, 9.0]), + (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]), + ]) + def test_rosen_brock_with_bounds(self, bounds, x_opt): + prob = Rosenbrock() + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, -10], + method='Nelder-Mead', + bounds=bounds) + assert np.less_equal(bounds.lb, result.x).all() + assert np.less_equal(result.x, bounds.ub).all() + assert np.allclose(prob.fun(result.x), result.fun) + assert np.allclose(result.x, x_opt, atol=1.e-3) + + def test_equal_all_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 5.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 5.0]) + + def test_equal_one_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 20.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 16.0]) + + def test_invalid_bounds(self): + prob = Rosenbrock() + message = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, -5.0]) + minimize(prob.fun, [-10, 3], + method='Nelder-Mead', + bounds=bounds) + + @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, " + "see gh-13846") + def test_outside_bounds_warning(self): + prob = Rosenbrock() + message = "Initial guess is not within the specified bounds" + with pytest.warns(UserWarning, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, 5.0]) + minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2ad1e3528400e0812a2d60c64a9724dea7fa6c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py @@ -0,0 +1,1099 @@ +""" +Unit tests for optimization routines from minpack.py. +""" +import warnings +import pytest + +from numpy.testing import (assert_, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose, + assert_warns, suppress_warnings) +from pytest import raises as assert_raises +import numpy as np +from numpy import array, float64 +from multiprocessing.pool import ThreadPool + +from scipy import optimize, linalg +from scipy.special import lambertw +from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point +from scipy.optimize import OptimizeWarning +from scipy.optimize._minimize import Bounds + + +class ReturnShape: + """This class exists to create a callable that does not have a '__name__' attribute. + + __init__ takes the argument 'shape', which should be a tuple of ints. + When an instance is called with a single argument 'x', it returns numpy.ones(shape). + """ + + def __init__(self, shape): + self.shape = shape + + def __call__(self, x): + return np.ones(self.shape) + + +def dummy_func(x, shape): + """A function that returns an array of ones of the given shape. + `x` is ignored. + """ + return np.ones(shape) + + +def sequence_parallel(fs): + with ThreadPool(len(fs)) as pool: + return pool.map(lambda f: f(), fs) + + +# Function and Jacobian for tests of solvers for systems of nonlinear +# equations + + +def pressure_network(flow_rates, Qtot, k): + """Evaluate non-linear equation system representing + the pressures and flows in a system of n parallel pipes:: + + f_i = P_i - P_0, for i = 1..n + f_0 = sum(Q_i) - Qtot + + where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. + Pressure is modeled as a P=kQ**2 where k is a valve coefficient and + Q is the flow rate. + + Parameters + ---------- + flow_rates : float + A 1-D array of n flow rates [kg/s]. + k : float + A 1-D array of n valve coefficients [1/kg m]. + Qtot : float + A scalar, the total input flow rate [kg/s]. + + Returns + ------- + F : float + A 1-D array, F[i] == f_i. + + """ + P = k * flow_rates**2 + F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) + return F + + +def pressure_network_jacobian(flow_rates, Qtot, k): + """Return the jacobian of the equation system F(flow_rates) + computed by `pressure_network` with respect to + *flow_rates*. See `pressure_network` for the detailed + description of parameters. + + Returns + ------- + jac : float + *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` + and *f_i* and *Q_i* are described in the doc for `pressure_network` + """ + n = len(flow_rates) + pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) + + jac = np.empty((n, n)) + jac[:n-1, :n-1] = pdiff * 0 + jac[:n-1, n-1] = 0 + jac[n-1, :] = np.ones(n) + + return jac + + +def pressure_network_fun_and_grad(flow_rates, Qtot, k): + return (pressure_network(flow_rates, Qtot, k), + pressure_network_jacobian(flow_rates, Qtot, k)) + + +class TestFSolve: + def test_pressure_network_no_gradient(self): + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_pressure_network_with_gradient(self): + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=pressure_network_jacobian) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_fprime_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_wrong_shape_fprime_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0], fprime=deriv_func) + + def test_float32(self): + def func(x): + return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2 + p = optimize.fsolve(func, np.array([1, 1], np.float32)) + assert_allclose(func(p), [0, 0], atol=1e-3) + + def test_reentrant_func(self): + def func(*args): + self.test_pressure_network_no_gradient() + return pressure_network(*args) + + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + func, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_reentrant_Dfunc(self): + def deriv_func(*args): + self.test_pressure_network_with_gradient() + return pressure_network_jacobian(*args) + + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=deriv_func) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_pressure_network_no_gradient] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_pressure_network_with_gradient] * 10) + assert all([result is None for result in v]) + + +class TestRootHybr: + def test_pressure_network_no_gradient(self): + # root/hybr without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='hybr', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient(self): + # root/hybr with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([[2., 0., 2., 0.]]) + final_flows = optimize.root(pressure_network, initial_guess, + args=(Qtot, k), method='hybr', + jac=pressure_network_jacobian).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient_combined(self): + # root/hybr with gradient and function combined, equal pipes -> equal + # flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network_fun_and_grad, + initial_guess, args=(Qtot, k), + method='hybr', jac=True).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestRootLM: + def test_pressure_network_no_gradient(self): + # root/lm without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='lm', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestLeastSq: + def setup_method(self): + x = np.linspace(0, 10, 40) + a,b,c = 3.1, 42, -304.2 + self.x = x + self.abc = a,b,c + y_true = a*x**2 + b*x + c + np.random.seed(0) + self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) + + def residuals(self, p, y, x): + a,b,c = p + err = y-(a*x**2 + b*x + c) + return err + + def residuals_jacobian(self, _p, _y, x): + return -np.vstack([x**2, x, np.ones_like(x)]).T + + def test_basic(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_basic_with_gradient(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=self.residuals_jacobian) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_full_output(self): + p0 = array([[0,0,0]]) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + + def test_input_untouched(self): + p0 = array([0,0,0],dtype=float64) + p0_copy = array(p0, copy=True) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + assert_array_equal(p0, p0_copy) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_Dfun_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_wrong_shape_Dfun_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_float32(self): + # Regression test for gh-1447 + def func(p,x,y): + q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3] + return q - y + + x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286, + 1.231], dtype=np.float32) + y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258, + 0.034,0.0396], dtype=np.float32) + p0 = np.array([1.0,1.0,1.0,1.0]) + p1, success = optimize.leastsq(func, p0, args=(x,y)) + + assert_(success in [1,2,3,4]) + assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum()) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0], Dfun=deriv_func) + + def test_reentrant_func(self): + def func(*args): + self.test_basic() + return self.residuals(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(func, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_reentrant_Dfun(self): + def deriv_func(*args): + self.test_basic() + return self.residuals_jacobian(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=deriv_func) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_basic] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_basic_with_gradient] * 10) + assert all([result is None for result in v]) + + def test_func_input_output_length_check(self): + + def func(x): + return 2 * (x[0] - 3) ** 2 + 1 + + with assert_raises(TypeError, + match='Improper input: func input vector length N='): + optimize.leastsq(func, x0=[0, 1]) + + +class TestCurveFit: + def setup_method(self): + self.y = array([1.0, 3.2, 9.5, 13.7]) + self.x = array([1.0, 2.0, 3.0, 4.0]) + + def test_one_argument(self): + def func(x,a): + return x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 1) + assert_(pcov.shape == (1,1)) + assert_almost_equal(popt[0], 1.9149, decimal=4) + assert_almost_equal(pcov[0,0], 0.0016, decimal=4) + + # Test if we get the same with full_output. Regression test for #1415. + # Also test if check_finite can be turned off. + res = curve_fit(func, self.x, self.y, + full_output=1, check_finite=False) + (popt2, pcov2, infodict, errmsg, ier) = res + assert_array_almost_equal(popt, popt2) + + def test_two_argument(self): + def func(x, a, b): + return b*x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 2) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_func_is_classmethod(self): + class test_self: + """This class tests if curve_fit passes the correct number of + arguments when the model function is a class instance method. + """ + + def func(self, x, a, b): + return b * x**a + + test_self_inst = test_self() + popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_regression_2639(self): + # This test fails if epsfcn in leastsq is too large. + x = [574.14200000000005, 574.154, 574.16499999999996, + 574.17700000000002, 574.18799999999999, 574.19899999999996, + 574.21100000000001, 574.22199999999998, 574.23400000000004, + 574.245] + y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, + 1550.0, 949.0, 841.0] + guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, + 0.0035019999999983615, 859.0] + good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03, + 1.0068462e-02, 8.57450661e+02] + + def f_double_gauss(x, x0, x1, A0, A1, sigma, c): + return (A0*np.exp(-(x-x0)**2/(2.*sigma**2)) + + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c) + popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000) + assert_allclose(popt, good, rtol=1e-5) + + def test_pcov(self): + xdata = np.array([0, 1, 2, 3, 4, 5]) + ydata = np.array([1, 1, 5, 7, 8, 12]) + sigma = np.array([1, 2, 1, 2, 1, 2]) + + def f(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3) + + # infinite variances + + def f_flat(x, a, b): + return a*x + + pcov_expected = np.array([np.inf]*4).reshape(2, 2) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Covariance of the parameters could not be estimated") + popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma) + popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0]) + + assert_(pcov.shape == (2, 2)) + assert_array_equal(pcov, pcov_expected) + + assert_(pcov1.shape == (2, 2)) + assert_array_equal(pcov1, pcov_expected) + + def test_array_like(self): + # Test sequence input. Regression test for gh-3037. + def f_linear(x, a, b): + return a*x + b + + x = [1, 2, 3, 4] + y = [3, 5, 7, 9] + assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10) + + def test_indeterminate_covariance(self): + # Test that a warning is returned when pcov is indeterminate + xdata = np.array([1, 2, 3, 4, 5, 6]) + ydata = np.array([1, 2, 3, 4, 5.5, 6]) + assert_warns(OptimizeWarning, curve_fit, + lambda x, a, b: a*x, xdata, ydata) + + def test_NaN_handling(self): + # Test for correct handling of NaNs in input data: gh-3422 + + # create input with NaNs + xdata = np.array([1, np.nan, 3]) + ydata = np.array([1, 2, 3]) + + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, xdata, ydata) + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, ydata, xdata) + + assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, + xdata, ydata, **{"check_finite": True}) + + @staticmethod + def _check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method): + kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan, + 'method': method, 'check_finite': False} + # propagate test + error_msg = ("`nan_policy='propagate'` is not supported " + "by this function.") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="propagate", maxfev=2000) + + # raise test + with assert_raises(ValueError, match="The input contains nan"): + curve_fit(**kwargs, nan_policy="raise") + + # omit test + result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit") + kwargs['xdata'] = xdata_without_nan + kwargs['ydata'] = ydata_without_nan + result_without_nan, _ = curve_fit(**kwargs) + assert_allclose(result_with_nan, result_without_nan) + + # not valid policy test + error_msg = ("nan_policy must be one of " + "{'None', 'raise', 'omit'}") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="hi") + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_1d(self, method): + def f(x, a, b): + return a*x + b + + xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7]) + xdata_without_nan = np.array([2, 3, 4]) + ydata_without_nan = np.array([1, 2, 3]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2d(self, method): + def f(x, a, b): + x1 = x[0, :] + x2 = x[1, :] + return a*x1 + b + x2 + + xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('n', [2, 3]) + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2_3d(self, n, method): + def f(x, a, b): + x1 = x[..., 0, :].squeeze() + x2 = x[..., 1, :].squeeze() + return a*x1 + b + x2 + + xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]]) + xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + def test_empty_inputs(self): + # Test both with and without bounds (regression test for gh-9864) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [], + bounds=(1, 2)) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [], + bounds=(1, 2)) + + def test_function_zero_params(self): + # Fit args is zero, so "Unable to determine number of fit parameters." + assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4]) + + def test_None_x(self): # Added in GH10196 + popt, pcov = curve_fit(lambda _, a: a * np.arange(10), + None, 2 * np.arange(10)) + assert_allclose(popt, [2.]) + + def test_method_argument(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov = curve_fit(f, xdata, ydata, method=method) + assert_allclose(popt, [2., 2.]) + + assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown') + + def test_full_output(self): + def f(x, a, b): + return a * np.exp(-b * x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov, infodict, errmsg, ier = curve_fit( + f, xdata, ydata, method=method, full_output=True) + assert_allclose(popt, [2., 2.]) + assert "nfev" in infodict + assert "fvec" in infodict + if method == 'lm' or method is None: + assert "fjac" in infodict + assert "ipvt" in infodict + assert "qtf" in infodict + assert isinstance(errmsg, str) + assert ier in (1, 2, 3, 4) + + def test_bounds(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # The minimum w/out bounds is at [2., 2.], + # and with bounds it's at [1.5, smth]. + lb = [1., 0] + ub = [1.5, 3.] + + # Test that both variants of the bounds yield the same result + bounds = (lb, ub) + bounds_class = Bounds(lb, ub) + for method in [None, 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, + method=method) + assert_allclose(popt[0], 1.5) + + popt_class, pcov_class = curve_fit(f, xdata, ydata, + bounds=bounds_class, + method=method) + assert_allclose(popt_class, popt) + + # With bounds, the starting estimate is feasible. + popt, pcov = curve_fit(f, xdata, ydata, method='trf', + bounds=([0., 0], [0.6, np.inf])) + assert_allclose(popt[0], 0.6) + + # method='lm' doesn't support bounds. + assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, + method='lm') + + def test_bounds_p0(self): + # This test is for issue #5719. The problem was that an initial guess + # was ignored when 'trf' or 'dogbox' methods were invoked. + def f(x, a): + return np.sin(x + a) + + xdata = np.linspace(-2*np.pi, 2*np.pi, 40) + ydata = np.sin(xdata) + bounds = (-3 * np.pi, 3 * np.pi) + for method in ['trf', 'dogbox']: + popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi) + popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi, + bounds=bounds, method=method) + + # If the initial guess is ignored, then popt_2 would be close 0. + assert_allclose(popt_1, popt_2) + + def test_jac(self): + # Test that Jacobian callable is handled correctly and + # weighted if sigma is provided. + def f(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # Test numerical options for least_squares backend. + for method in ['trf', 'dogbox']: + for scheme in ['2-point', '3-point', 'cs']: + popt, pcov = curve_fit(f, xdata, ydata, jac=scheme, + method=method) + assert_allclose(popt, [2, 2]) + + # Test the analytic option. + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac) + assert_allclose(popt, [2, 2]) + + # Now add an outlier and provide sigma. + ydata[5] = 100 + sigma = np.ones(xdata.shape[0]) + sigma[5] = 200 + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method, + jac=jac) + # Still the optimization process is influenced somehow, + # have to set rtol=1e-3. + assert_allclose(popt, [2, 2], rtol=1e-3) + + def test_maxfev_and_bounds(self): + # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq) + # but with bounds, the parameter is `max_nfev` (via least_squares) + x = np.arange(0, 10) + y = 2*x + popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100) + popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100) + + assert_allclose(popt1, 2, atol=1e-14) + assert_allclose(popt2, 2, atol=1e-14) + + def test_curvefit_simplecovariance(self): + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + + for jac1, jac2 in [(jac, jac), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, atol=1e-14) + assert_allclose(pcov1, pcov2, atol=1e-14) + + def test_curvefit_covariance(self): + + def funcp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + return rotn.dot(a * np.exp(-b*x)) + + def jacp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + e = np.exp(-b*x) + return rotn.dot(np.vstack((e, -a * x * e)).T) + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.arange(1, 4) + y = func(xdata, 2.5, 1.0) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + # Get a rotation matrix, and obtain ydatap = R ydata + # Chisq = ydata^T C^{-1} ydata + # = ydata^T R^T R C^{-1} R^T R ydata + # = ydatap^T Cp^{-1} ydatap + # Cp^{-1} = R C^{-1} R^T + # Cp = R C R^T, since R^-1 = R^T + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + ydatap = rotn.dot(ydata) + covarp = rotn.dot(covar).dot(rotn.T) + + for jac1, jac2 in [(jac, jacp), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14) + assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14) + + @pytest.mark.parametrize("absolute_sigma", [False, True]) + def test_curvefit_scalar_sigma(self, absolute_sigma): + def func(x, a, b): + return a * x + b + + x, y = self.x, self.y + _, pcov1 = curve_fit(func, x, y, sigma=2, absolute_sigma=absolute_sigma) + # Explicitly building the sigma 1D array + _, pcov2 = curve_fit( + func, x, y, sigma=np.full_like(y, 2), absolute_sigma=absolute_sigma + ) + assert np.all(pcov1 == pcov2) + + def test_dtypes(self): + # regression test for gh-9581: curve_fit fails if x and y dtypes differ + x = np.arange(-3, 5) + y = 1.5*x + 3.0 + 0.5*np.sin(x) + + def func(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + for dtx in [np.float32, np.float64]: + for dty in [np.float32, np.float64]: + x = x.astype(dtx) + y = y.astype(dty) + + with warnings.catch_warnings(): + warnings.simplefilter("error", OptimizeWarning) + p, cov = curve_fit(func, x, y, method=method) + + assert np.isfinite(cov).all() + assert not np.allclose(p, 1) # curve_fit's initial value + + def test_dtypes2(self): + # regression test for gh-7117: curve_fit fails if + # both inputs are float32 + def hyperbola(x, s_1, s_2, o_x, o_y, c): + b_2 = (s_1 + s_2) / 2 + b_1 = (s_2 - s_1) / 2 + return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4) + + min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0]) + max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0]) + guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5]) + + params = [-2, .4, -1, -5, 9.5] + xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32]) + ydata = hyperbola(xdata, *params) + + # run optimization twice, with xdata being float32 and float64 + popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + xdata = xdata.astype(np.float32) + ydata = hyperbola(xdata, *params) + + popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + assert_allclose(popt_32, popt_64, atol=2e-5) + + def test_broadcast_y(self): + xdata = np.arange(10) + target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata)) + def fit_func(x, a, b): + return a * x ** 2 + b * x - target + for method in ['lm', 'trf', 'dogbox']: + popt0, pcov0 = curve_fit(fit_func, + xdata=xdata, + ydata=np.zeros_like(xdata), + method=method) + popt1, pcov1 = curve_fit(fit_func, + xdata=xdata, + ydata=0, + method=method) + assert_allclose(pcov0, pcov1) + + def test_args_in_kwargs(self): + # Ensure that `args` cannot be passed as keyword argument to `curve_fit` + + def func(x, a, b): + return a * x + b + + with assert_raises(ValueError): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17], + p0=[1], + args=(1,)) + + def test_data_point_number_validation(self): + def func(x, a, b, c, d, e): + return a * np.exp(-b * x) + c + d + e + + with assert_raises(TypeError, match="The number of func parameters="): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17]) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_gh4555(self): + # gh-4555 reported that covariance matrices returned by `leastsq` + # can have negative diagonal elements and eigenvalues. (In fact, + # they can also be asymmetric.) This shows up in the output of + # `scipy.optimize.curve_fit`. Check that it has been resolved.giit + def f(x, a, b, c, d, e): + return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e + + rng = np.random.default_rng(408113519974467917) + n = 100 + x = np.arange(n) + y = np.linspace(2, 7, n) + rng.random(n) + p, cov = optimize.curve_fit(f, x, y, maxfev=100000) + assert np.all(np.diag(cov) > 0) + eigs = linalg.eigh(cov)[0] # separate line for debugging + # some platforms see a small negative eigevenvalue + assert np.all(eigs > -1e-2) + assert_allclose(cov, cov.T) + + def test_gh4555b(self): + # check that PR gh-17247 did not significantly change covariance matrix + # for simple cases + rng = np.random.default_rng(408113519974467917) + + def func(x, a, b, c): + return a * np.exp(-b * x) + c + + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3, 0.5) + y_noise = 0.2 * rng.normal(size=xdata.size) + ydata = y + y_noise + _, res = curve_fit(func, xdata, ydata) + # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b + ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749], + [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403], + [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]] + # Linux_Python_38_32bit_full fails with default tolerance + assert_allclose(res, ref, 2e-7) + + def test_gh13670(self): + # gh-13670 reported that `curve_fit` executes callables + # with the same values of the parameters at the beginning of + # optimization. Check that this has been resolved. + + rng = np.random.default_rng(8250058582555444926) + x = np.linspace(0, 3, 101) + y = 2 * x + 1 + rng.normal(size=101) * 0.5 + + def line(x, *p): + assert not np.all(line.last_p == p) + line.last_p = p + return x * p[0] + p[1] + + def jac(x, *p): + assert not np.all(jac.last_p == p) + jac.last_p = p + return np.array([x, np.ones_like(x)]).T + + line.last_p = None + jac.last_p = None + p0 = np.array([1.0, 5.0]) + curve_fit(line, x, y, p0, method='lm', jac=jac) + + +class TestFixedPoint: + + def test_scalar_trivial(self): + # f(x) = 2x; fixed point should be x=0 + def func(x): + return 2.0*x + x0 = 1.0 + x = fixed_point(func, x0) + assert_almost_equal(x, 0.0) + + def test_scalar_basic1(self): + # f(x) = x**2; x0=1.05; fixed point should be x=1 + def func(x): + return x**2 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_scalar_basic2(self): + # f(x) = x**0.5; x0=1.05; fixed point should be x=1 + def func(x): + return x**0.5 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_array_trivial(self): + def func(x): + return 2.0*x + x0 = [0.3, 0.15] + with np.errstate(all='ignore'): + x = fixed_point(func, x0) + assert_almost_equal(x, [0.0, 0.0]) + + def test_array_basic1(self): + # f(x) = c * x**2; fixed point should be x=1/c + def func(x, c): + return c * x**2 + c = array([0.75, 1.0, 1.25]) + x0 = [1.1, 1.15, 0.9] + with np.errstate(all='ignore'): + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, 1.0/c) + + def test_array_basic2(self): + # f(x) = c * x**0.5; fixed point should be x=c**2 + def func(x, c): + return c * x**0.5 + c = array([0.75, 1.0, 1.25]) + x0 = [0.8, 1.1, 1.1] + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, c**2) + + def test_lambertw(self): + # python-list/2010-December/594592.html + xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, + args=(), xtol=1e-12, maxiter=500) + assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) + assert_allclose(xxroot, lambertw(1)/2) + + def test_no_acceleration(self): + # github issue 5460 + ks = 2 + kl = 6 + m = 1.3 + n0 = 1.001 + i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1)) + + def func(n): + return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1 + + n = fixed_point(func, n0, method='iteration') + assert_allclose(n, m) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py new file mode 100644 index 0000000000000000000000000000000000000000..69b0f186a5afcb194bf64502f0c0c5bbd2af5561 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py @@ -0,0 +1,318 @@ +import numpy as np +from numpy.testing import assert_allclose +from pytest import raises as assert_raises +from scipy.optimize import nnls + + +class TestNNLS: + def setup_method(self): + self.rng = np.random.default_rng(1685225766635251) + + def test_nnls(self): + a = np.arange(25.0).reshape(-1, 5) + x = np.arange(5.0) + y = a @ x + x, res = nnls(a, y) + assert res < 1e-7 + assert np.linalg.norm((a @ x) - y) < 1e-7 + + def test_nnls_tall(self): + a = self.rng.uniform(low=-10, high=10, size=[50, 10]) + x = np.abs(self.rng.uniform(low=-2, high=2, size=[10])) + x[::2] = 0 + b = a @ x + xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) + assert_allclose(xact, x, rtol=0., atol=1e-10) + assert rnorm < 1e-12 + + def test_nnls_wide(self): + # If too wide then problem becomes too ill-conditioned ans starts + # emitting warnings, hence small m, n difference. + a = self.rng.uniform(low=-10, high=10, size=[100, 120]) + x = np.abs(self.rng.uniform(low=-2, high=2, size=[120])) + x[::2] = 0 + b = a @ x + xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) + assert_allclose(xact, x, rtol=0., atol=1e-10) + assert rnorm < 1e-12 + + def test_maxiter(self): + # test that maxiter argument does stop iterations + a = self.rng.uniform(size=(5, 10)) + b = self.rng.uniform(size=5) + with assert_raises(RuntimeError): + nnls(a, b, maxiter=1) + + def test_nnls_inner_loop_case1(self): + # See gh-20168 + n = np.array( + [3, 2, 0, 1, 1, 1, 3, 8, 14, 16, 29, 23, 41, 47, 53, 57, 67, 76, + 103, 89, 97, 94, 85, 95, 78, 78, 78, 77, 73, 50, 50, 56, 68, 98, + 95, 112, 134, 145, 158, 172, 213, 234, 222, 215, 216, 216, 206, + 183, 135, 156, 110, 92, 63, 60, 52, 29, 20, 16, 12, 5, 5, 5, 1, 2, + 3, 0, 2]) + k = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0.7205812007860187, 0., 1.4411624015720375, + 0.7205812007860187, 2.882324803144075, 5.76464960628815, + 5.76464960628815, 12.249880413362318, 15.132205216506394, + 20.176273622008523, 27.382085629868712, 48.27894045266326, + 47.558359251877235, 68.45521407467177, 97.99904330689854, + 108.0871801179028, 135.46926574777152, 140.51333415327366, + 184.4687874012208, 171.49832578707245, 205.36564222401535, + 244.27702706646033, 214.01261663344755, 228.42424064916793, + 232.02714665309804, 205.36564222401535, 172.9394881886445, + 191.67459940908097, 162.1307701768542, 153.48379576742198, + 110.96950492104689, 103.04311171240067, 86.46974409432225, + 60.528820866025576, 43.234872047161126, 23.779179625938617, + 24.499760826724636, 17.29394881886445, 11.5292992125763, + 5.76464960628815, 5.044068405502131, 3.6029060039300935, 0., + 2.882324803144075, 0., 0., 0.]) + d = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0.003889242101538, 0., 0.007606268390096, 0., + 0.025457371599973, 0.036952882091577, 0., 0.08518359183449, + 0.048201126400243, 0.196234990022205, 0.144116240157247, + 0.171145134062442, 0., 0., 0.269555036538714, 0., 0., 0., + 0.010893241091872, 0., 0., 0., 0., 0., 0., 0., 0., + 0.048167058272886, 0.011238724891049, 0., 0., 0.055162603456078, + 0., 0., 0., 0., 0.027753339088588, 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0.]) + # The following code sets up a system of equations such that + # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and + # monotonicity constraints on $p_i$. This translates to a system of + # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and + # non-negativity constraints on the $d_i$. If $n_i$ is zero the + # system is modified such that $d_i - d_{i+1}$ is then minimized. + N = len(n) + A = np.diag(n) @ np.tril(np.ones((N, N))) + w = n ** 0.5 + + nz = (n == 0).nonzero()[0] + A[nz, nz] = 1 + A[nz, np.minimum(nz + 1, N - 1)] = -1 + w[nz] = 1 + k[nz] = 0 + W = np.diag(w) + + # Small perturbations can already make the infinite loop go away (just + # uncomment the next line) + # k = k + 1e-10 * np.random.normal(size=N) + dact, _ = nnls(W @ A, W @ k) + assert_allclose(dact, d, rtol=0., atol=1e-10) + + def test_nnls_inner_loop_case2(self): + # See gh-20168 + n = np.array( + [1, 0, 1, 2, 2, 2, 3, 3, 5, 4, 14, 14, 19, 26, 36, 42, 36, 64, 64, + 64, 81, 85, 85, 95, 95, 95, 75, 76, 69, 81, 62, 59, 68, 64, 71, 67, + 74, 78, 118, 135, 153, 159, 210, 195, 218, 243, 236, 215, 196, 175, + 185, 149, 144, 103, 104, 75, 56, 40, 32, 26, 17, 9, 12, 8, 2, 1, 1, + 1]) + k = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.7064355064917867, 0., 0., 2.11930651947536, + 0.7064355064917867, 0., 3.5321775324589333, 7.064355064917867, + 11.302968103868587, 16.95445215580288, 20.486629688261814, + 20.486629688261814, 37.44108184406469, 55.808405012851146, + 78.41434122058831, 103.13958394780086, 105.965325973768, + 125.74552015553803, 149.057891869767, 176.60887662294667, + 197.09550631120848, 211.930651947536, 204.86629688261814, + 233.8301526487814, 221.1143135319292, 195.6826352982249, + 197.80194181770025, 191.4440222592742, 187.91184472681525, + 144.11284332432447, 131.39700420747232, 116.5618585711448, + 93.24948685691584, 89.01087381796512, 53.68909849337579, + 45.211872415474346, 31.083162285638615, 24.72524272721253, + 16.95445215580288, 9.890097090885014, 9.890097090885014, + 2.8257420259671466, 2.8257420259671466, 1.4128710129835733, + 0.7064355064917867, 1.4128710129835733]) + d = np.array( + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.0021916146355674473, 0., 0., + 0.011252740799789484, 0., 0., 0.037746623295934395, + 0.03602328132946222, 0.09509167709829734, 0.10505765870204821, + 0.01391037014274718, 0.0188296228752321, 0.20723559202324254, + 0.3056220879462608, 0.13304643490426477, 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0.043185876949706214, 0.0037266261379722554, + 0., 0., 0., 0., 0., 0.094797899357143, 0., 0., 0., 0., 0., 0., 0., + 0., 0.23450935613672663, 0., 0., 0.07064355064917871]) + # The following code sets up a system of equations such that + # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and + # monotonicity constraints on $p_i$. This translates to a system of + # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and + # non-negativity constraints on the $d_i$. If $n_i$ is zero the + # system is modified such that $d_i - d_{i+1}$ is then minimized. + N = len(n) + A = np.diag(n) @ np.tril(np.ones((N, N))) + w = n ** 0.5 + + nz = (n == 0).nonzero()[0] + A[nz, nz] = 1 + A[nz, np.minimum(nz + 1, N - 1)] = -1 + w[nz] = 1 + k[nz] = 0 + W = np.diag(w) + + dact, _ = nnls(W @ A, W @ k, atol=1e-7) + + p = np.cumsum(dact) + assert np.all(dact >= 0) + assert np.linalg.norm(k - n * p, ord=np.inf) < 28 + assert_allclose(dact, d, rtol=0., atol=1e-10) + + def test_nnls_gh20302(self): + # See gh-20302 + A = np.array( + [0.33408569134321575, 0.11136189711440525, 0.049140798007949286, + 0.03712063237146841, 0.055680948557202625, 0.16642814595936478, + 0.11095209730624318, 0.09791993030943345, 0.14793612974165757, + 0.44380838922497273, 0.11099502671044059, 0.11099502671044059, + 0.14693672599330593, 0.3329850801313218, 1.498432860590948, + 0.0832374225132955, 0.11098323001772734, 0.19589481249472837, + 0.5919105600945457, 3.5514633605672747, 0.06658716751427037, + 0.11097861252378394, 0.24485832778293645, 0.9248217710315328, + 6.936163282736496, 0.05547609388181014, 0.11095218776362029, + 0.29376003042571264, 1.3314262531634435, 11.982836278470993, + 0.047506113282944136, 0.11084759766020298, 0.3423969672933396, + 1.8105107617833156, 19.010362998724812, 0.041507335004505576, + 0.11068622667868154, 0.39074115283013344, 2.361306169145206, + 28.335674029742474, 0.03682846280947718, 0.11048538842843154, + 0.4387861797121048, 2.9831054875676517, 40.2719240821633, + 0.03311278164362387, 0.11037593881207958, 0.4870572300443105, + 3.6791979604026523, 55.187969406039784, 0.030079304092299915, + 0.11029078167176636, 0.5353496017200152, 4.448394860761242, + 73.3985152025605, 0.02545939709595835, 0.11032405408248619, + 0.6328767609778363, 6.214921713313388, 121.19097340961108, + 0.022080881724881523, 0.11040440862440762, 0.7307742886903428, + 8.28033064683057, 186.30743955368786, 0.020715838214945492, + 0.1104844704797093, 0.7800578384588346, 9.42800814760186, + 226.27219554244465, 0.01843179728340054, 0.11059078370040323, + 0.8784095015912599, 11.94380463964355, 322.48272527037585, + 0.015812787653789077, 0.11068951357652354, 1.0257259848595766, + 16.27135849574896, 512.5477926160922, 0.014438550529330062, + 0.11069555405819713, 1.1234754801775881, 19.519316032262093, + 673.4164031130423, 0.012760770585072577, 0.110593345070629, + 1.2688431112524712, 24.920367089248398, 971.8943164806875, + 0.011427556646114315, 0.11046638091243838, 1.413623342459821, + 30.967408782453557, 1347.0822820367298, 0.010033330264470307, + 0.11036663290917338, 1.6071533470570285, 40.063087746029936, + 1983.122843428482, 0.008950061496507258, 0.11038409179025618, + 1.802244865119193, 50.37194055362024, 2795.642700725923, + 0.008071078821135658, 0.11030474388885401, 1.9956465761433504, + 61.80742482572119, 3801.1566267818534, 0.007191031207777556, + 0.11026247851925586, 2.238160187262168, 77.7718015155818, + 5366.2543045751445, 0.00636834224248, 0.11038459886965334, + 2.5328963107984297, 99.49331844784753, 7760.4788389321075, + 0.005624259098118485, 0.11061042892966355, 2.879742607664547, + 128.34496770138628, 11358.529641572684, 0.0050354270614989555, + 0.11077939535297703, 3.2263279459292575, 160.85168205252265, + 15924.316523199741, 0.0044997853165982555, 0.1109947044760903, + 3.6244287189055613, 202.60233390369015, 22488.859063309606, + 0.004023601950058174, 0.1113196539516095, 4.07713905729421, + 255.6270320242126, 31825.565487014468, 0.0036024117873727094, + 0.111674765408554, 4.582933773135057, 321.9583486728612, + 44913.18963986413, 0.003201503089582304, 0.11205260813538065, + 5.191786833370116, 411.79333489752383, 64857.45024636, + 0.0028633044552448853, 0.11262330857296549, 5.864295861648949, + 522.7223161899905, 92521.84996562831, 0.0025691897303891965, + 0.11304434813712465, 6.584584405106342, 656.5615739804199, + 129999.19164812315, 0.0022992911894424675, 0.11343169867916175, + 7.4080129906658305, 828.2026426227864, 183860.98666225857, + 0.0020449922071108764, 0.11383789952917212, 8.388975556433872, + 1058.2750599896935, 265097.9025274183, 0.001831274615120854, + 0.11414945100919989, 9.419351803810935, 1330.564050780237, + 373223.2162438565, 0.0016363333454631633, 0.11454333418242145, + 10.6143816579462, 1683.787012481595, 530392.9089317025, + 0.0014598610433380044, 0.11484240207592301, 11.959688127956882, + 2132.0874753402027, 754758.9662704318, 0.0012985240015312626, + 0.11513579480243862, 13.514425358573531, 2715.5160990137824, + 1083490.9235064993, 0.0011614735761289934, 0.11537304189548002, + 15.171418602667567, 3415.195870828736, 1526592.554260445, + 0.0010347472698811352, 0.11554677847006009, 17.080800985009617, + 4322.412404600832, 2172012.2333119176, 0.0009232988811258664, + 0.1157201264344419, 19.20004861829407, 5453.349531598553, + 3075689.135821584, 0.0008228871862975205, 0.11602709326795038, + 21.65735242414206, 6920.203923780365, 4390869.389638642, + 0.00073528900066722, 0.11642075843897651, 24.40223571298994, + 8755.811207598026, 6238515.485413593, 0.0006602764384729194, + 0.11752920604817965, 27.694443541914293, 11171.386093291572, + 8948280.260726549, 0.0005935538977939806, 0.11851292825953147, + 31.325508920763063, 14174.185724149384, 12735505.873148222, + 0.0005310755355633124, 0.11913794514470308, 35.381052949627765, + 17987.010118815077, 18157886.71494382, 0.00047239949671590953, + 0.1190446731724092, 39.71342528048061, 22679.438775422022, + 25718483.571328573, 0.00041829129789387623, 0.11851586773659825, + 44.45299332965028, 28542.57147989741, 36391778.63686921, + 0.00037321512015419886, 0.11880681324908665, 50.0668539579632, + 36118.26128449941, 51739409.29004541, 0.0003315539616702064, + 0.1184752823034871, 56.04387059062639, 45383.29960621684, + 72976345.76679668, 0.00029456064937920213, 0.11831519416731286, + 62.91195073220101, 57265.53993693082, 103507463.43600245, + 0.00026301867496859703, 0.11862142241083726, 70.8217262087034, + 72383.14781936012, 146901598.49939138, 0.00023618734450420032, + 0.11966825454879482, 80.26535457124461, 92160.51176984518, + 210125966.835247, 0.00021165918071578316, 0.12043407382728061, + 90.7169587544247, 116975.56852918258, 299515943.218972, + 0.00018757727511329545, 0.11992440455576689, 101.49899864101785, + 147056.26174166967, 423080865.0307836, 0.00016654469159895833, + 0.11957908856805206, 113.65970431102812, 184937.67016486943, + 597533612.3026931, 0.00014717439179415048, 0.11872067604728138, + 126.77899683346702, 231758.58906776624, 841283678.3159915, + 0.00012868496382376066, 0.1166314722122684, 139.93635237349534, + 287417.30847929465, 1172231492.6328032, 0.00011225559452625302, + 0.11427619522772557, 154.0034283704458, 355281.4912295324, + 1627544511.322488, 9.879511142981067e-05, 0.11295574406808354, + 170.96532050841535, 442971.0111288653, 2279085852.2580123, + 8.71257780313587e-05, 0.11192758284428547, 190.35067416684697, + 554165.2523674504, 3203629323.93623, 7.665069027765277e-05, + 0.11060694607065294, 211.28835951100046, 690933.608546013, + 4486577387.093535, 6.734021094824451e-05, 0.10915848194710433, + 234.24338803525194, 860487.9079859136, 6276829044.8032465, + 5.9191625040287665e-05, 0.10776821865668373, 259.7454711820425, + 1071699.0387579766, 8780430224.544102, 5.1856803674907676e-05, + 0.10606444911641115, 287.1843540288165, 1331126.3723998806, + 12251687131.5685, 4.503421404759231e-05, 0.10347361247668461, + 314.7338642485931, 1638796.0697522392, 16944331963.203278, + 3.90470387455642e-05, 0.1007804070023012, 344.3427560918527, + 2014064.4865519698, 23392351979.057854, 3.46557661636393e-05, + 0.10046706610839032, 385.56603915081587, 2533036.2523656, + 33044724430.235435, 3.148745865254635e-05, 0.1025441570117926, + 442.09038234164746, 3262712.3882769793, 47815050050.199135, + 2.9790762078715404e-05, 0.1089845379379672, 527.8068231298969, + 4375751.903321453, 72035815708.42941, 2.8772639817606534e-05, + 0.11823636789048445, 643.2048194503195, 5989838.001888927, + 110764084330.93005, 2.7951691815106586e-05, 0.12903432664913705, + 788.5500418523591, 8249371.000613411, 171368308481.2427, + 2.6844392423114212e-05, 0.1392060709754626, 955.6296403631383, + 11230229.319931043, 262063016295.25085, 2.499458273851386e-05, + 0.14559344445184325, 1122.7022399726002, 14820229.698461473, + 388475270970.9214, 2.337386729019776e-05, 0.15294300496886065, + 1324.8158105672455, 19644861.137128454, 578442936182.7473, + 2.0081014872174113e-05, 0.14760215298210377, 1436.2385042492353, + 23923681.729276657, 791311658718.4193, 1.773374462991839e-05, + 0.14642752940923615, 1600.5596278736678, 29949429.82503553, + 1112815989293.9326, 1.5303115839590797e-05, 0.14194150045081785, + 1742.873058605698, 36634451.931305364, 1529085389160.7544, + 1.3148448731163076e-05, 0.13699368732998807, 1889.5284359054356, + 44614279.74469635, 2091762812969.9607, 1.1739194407590062e-05, + 0.13739553134643406, 2128.794599579694, 56462810.11822766, + 2973783283306.8145, 1.0293367506254706e-05, 0.13533033372723272, + 2355.372854690074, 70176508.28667311, 4151852759764.441, + 9.678312586863569e-06, 0.14293577249119244, 2794.531827932675, + 93528671.31952812, 6215821967224.52, -1.174086323572049e-05, + 0.1429501325944908, 3139.4804810720925, 118031680.16618933, + -6466892421886.174, -2.1188265307407812e-05, 0.1477108290912869, + 3644.1133424610953, 153900132.62392554, -4828013117542.036, + -8.614483025123122e-05, 0.16037100755883044, 4444.386620899393, + 210846007.89660168, -1766340937974.433, 4.981445776141726e-05, + 0.16053420251962536, 4997.558254401547, 266327328.4755411, + 3862250287024.725, 1.8500019169456637e-05, 0.15448417164977674, + 5402.289867444643, 323399508.1475582, 12152445411933.408, + -5.647882376069748e-05, 0.1406372975946189, 5524.633133597753, + 371512945.9909363, -4162951345292.1514, 2.8048523486337994e-05, + 0.13183417571186926, 5817.462495763679, 439447252.3728975, + 9294740538175.03]).reshape(89, 5) + b = np.ones(89, dtype=np.float64) + sol, rnorm = nnls(A, b) + assert_allclose(sol, np.array([0.61124315, 8.22262829, 0., 0., 0.])) + assert_allclose(rnorm, 1.0556460808977297) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..d65a86198972df00842455462d5713924b44f182 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py @@ -0,0 +1,534 @@ +""" Unit tests for nonlinear solvers +Author: Ondrej Certik +May 2007 +""" +from numpy.testing import assert_ +import pytest + +from scipy.optimize import _nonlin as nonlin, root +from scipy.sparse import csr_array +from numpy import diag, dot +from numpy.linalg import inv +import numpy as np +import scipy + +from .test_minpack import pressure_network + +SOLVERS = {'anderson': nonlin.anderson, + 'diagbroyden': nonlin.diagbroyden, + 'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing, + 'broyden1': nonlin.broyden1, + 'broyden2': nonlin.broyden2, + 'krylov': nonlin.newton_krylov} +MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1, + 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov} + +# ---------------------------------------------------------------------------- +# Test problems +# ---------------------------------------------------------------------------- + + +def F(x): + x = np.asarray(x).T + d = diag([3, 2, 1.5, 1, 0.5]) + c = 0.01 + f = -d @ x - c * float(x.T @ x) * x + return f + + +F.xin = [1, 1, 1, 1, 1] +F.KNOWN_BAD = {} +F.JAC_KSP_BAD = {} +F.ROOT_JAC_KSP_BAD = {} + + +def F2(x): + return x + + +F2.xin = [1, 2, 3, 4, 5, 6] +F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing} +F2.JAC_KSP_BAD = {} +F2.ROOT_JAC_KSP_BAD = {} + + +def F2_lucky(x): + return x + + +F2_lucky.xin = [0, 0, 0, 0, 0, 0] +F2_lucky.KNOWN_BAD = {} +F2_lucky.JAC_KSP_BAD = {} +F2_lucky.ROOT_JAC_KSP_BAD = {} + + +def F3(x): + A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]]) + b = np.array([1, 2, 3.]) + return A @ x - b + + +F3.xin = [1, 2, 3] +F3.KNOWN_BAD = {} +F3.JAC_KSP_BAD = {} +F3.ROOT_JAC_KSP_BAD = {} + + +def F4_powell(x): + A = 1e4 + return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)] + + +F4_powell.xin = [-1, -2] +F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing, + 'diagbroyden': nonlin.diagbroyden} +# In the extreme case, it does not converge for nolinear problem solved by +# MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using +# Krylov method to approximate Jacobian +F4_powell.JAC_KSP_BAD = {'minres'} +F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'} + + +def F5(x): + return pressure_network(x, 4, np.array([.5, .5, .5, .5])) + + +F5.xin = [2., 0, 2, 0] +F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} +# In the extreme case, the Jacobian inversion yielded zero vector for nonlinear +# problem solved by CGS/MINRES and it does not converge for root problem solved +# by MINRES and when using Krylov method to approximate Jacobian +F5.JAC_KSP_BAD = {'cgs', 'minres'} +F5.ROOT_JAC_KSP_BAD = {'minres'} + + +def F6(x): + x1, x2 = x + J0 = np.array([[-4.256, 14.7], + [0.8394989, 0.59964207]]) + v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6, + np.sin(x2 * np.exp(x1) - 1)]) + return -np.linalg.solve(J0, v) + + +F6.xin = [-0.5, 1.4] +F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} +F6.JAC_KSP_BAD = {} +F6.ROOT_JAC_KSP_BAD = {} + + +# ---------------------------------------------------------------------------- +# Tests +# ---------------------------------------------------------------------------- + + +class TestNonlin: + """ + Check the Broyden methods for a few test problems. + + broyden1, broyden2, and newton_krylov must succeed for + all functions. Some of the others don't -- tests in KNOWN_BAD are skipped. + + """ + + def _check_nonlin_func(self, f, func, f_tol=1e-2): + # Test all methods mentioned in the class `KrylovJacobian` + if func == SOLVERS['krylov']: + for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: + if method in f.JAC_KSP_BAD: + continue + + x = func(f, f.xin, method=method, line_search=None, + f_tol=f_tol, maxiter=200, verbose=0) + assert_(np.absolute(f(x)).max() < f_tol) + + x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0) + assert_(np.absolute(f(x)).max() < f_tol) + + def _check_root(self, f, method, f_tol=1e-2): + # Test Krylov methods + if method == 'krylov': + for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: + if jac_method in f.ROOT_JAC_KSP_BAD: + continue + + res = root(f, f.xin, method=method, + options={'ftol': f_tol, 'maxiter': 200, + 'disp': 0, + 'jac_options': {'method': jac_method}}) + assert_(np.absolute(res.fun).max() < f_tol) + + res = root(f, f.xin, method=method, + options={'ftol': f_tol, 'maxiter': 200, 'disp': 0}) + assert_(np.absolute(res.fun).max() < f_tol) + + @pytest.mark.xfail + def _check_func_fail(self, *a, **kw): + pass + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + def test_problem_nonlin(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for func in SOLVERS.values(): + if func in f.KNOWN_BAD.values(): + if func in MUST_WORK.values(): + self._check_func_fail(f, func) + continue + self._check_nonlin_func(f, func) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + @pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs', + 'minres', 'tfqmr']) + def test_tol_norm_called(self, method): + # Check that supplying tol_norm keyword to nonlin_solve works + self._tol_norm_used = False + + def local_norm_func(x): + self._tol_norm_used = True + return np.absolute(x).max() + + nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2, + maxiter=200, verbose=0, + tol_norm=local_norm_func) + assert_(self._tol_norm_used) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + def test_problem_root(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for meth in SOLVERS: + if meth in f.KNOWN_BAD: + if meth in MUST_WORK: + self._check_func_fail(f, meth) + continue + self._check_root(f, meth) + + def test_no_convergence(self): + def wont_converge(x): + return 1e3 + x + + with pytest.raises(scipy.optimize.NoConvergence): + nonlin.newton_krylov(wont_converge, xin=[0], maxiter=1) + + +class TestSecant: + """Check that some Jacobian approximations satisfy the secant condition""" + + xs = [np.array([1., 2., 3., 4., 5.]), + np.array([2., 3., 4., 5., 1.]), + np.array([3., 4., 5., 1., 2.]), + np.array([4., 5., 1., 2., 3.]), + np.array([9., 1., 9., 1., 3.]), + np.array([0., 1., 9., 1., 3.]), + np.array([5., 5., 7., 1., 1.]), + np.array([1., 2., 7., 5., 1.]),] + fs = [x**2 - 1 for x in xs] + + def _check_secant(self, jac_cls, npoints=1, **kw): + """ + Check that the given Jacobian approximation satisfies secant + conditions for last `npoints` points. + """ + jac = jac_cls(**kw) + jac.setup(self.xs[0], self.fs[0], None) + for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + jac.update(x, f) + + for k in range(min(npoints, j+1)): + dx = self.xs[j-k+1] - self.xs[j-k] + df = self.fs[j-k+1] - self.fs[j-k] + assert_(np.allclose(dx, jac.solve(df))) + + # Check that the `npoints` secant bound is strict + if j >= npoints: + dx = self.xs[j-npoints+1] - self.xs[j-npoints] + df = self.fs[j-npoints+1] - self.fs[j-npoints] + assert_(not np.allclose(dx, jac.solve(df))) + + def test_broyden1(self): + self._check_secant(nonlin.BroydenFirst) + + def test_broyden2(self): + self._check_secant(nonlin.BroydenSecond) + + def test_broyden1_update(self): + # Check that BroydenFirst update works as for a dense matrix + jac = nonlin.BroydenFirst(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + B = np.identity(5) * (-1/0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + B += (df - dot(B, dx))[:, None] * dx[None, :] / dot(dx, dx) + jac.update(x, f) + assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)) + + def test_broyden2_update(self): + # Check that BroydenSecond update works as for a dense matrix + jac = nonlin.BroydenSecond(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + H = np.identity(5) * (-0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + H += (dx - dot(H, df))[:, None] * df[None, :] / dot(df, df) + jac.update(x, f) + assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)) + + def test_anderson(self): + # Anderson mixing (with w0=0) satisfies secant conditions + # for the last M iterates, see [Ey]_ + # + # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3) + + +class TestLinear: + """Solve a linear equation; + some methods find the exact solution in a finite number of steps""" + + def _check(self, jac, N, maxiter, complex=False, **kw): + np.random.seed(123) + + A = np.random.randn(N, N) + if complex: + A = A + 1j*np.random.randn(N, N) + b = np.random.randn(N) + if complex: + b = b + 1j*np.random.randn(N) + + def func(x): + return dot(A, x) - b + + sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter, + f_tol=1e-6, line_search=None, verbose=0) + assert_(np.allclose(dot(A, sol), b, atol=1e-6)) + + def test_broyden1(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True) + + def test_broyden2(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True) + + def test_anderson(self): + # Anderson is rather similar to Broyden, if given enough storage space + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False) + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True) + + def test_krylov(self): + # Krylov methods solve linear systems exactly in N inner steps + self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10) + self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10) + + def _check_autojac(self, A, b): + def func(x): + return A.dot(x) - b + + def jac(v): + return A + + sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), jac, maxiter=2, + f_tol=1e-6, line_search=None, verbose=0) + np.testing.assert_allclose(A @ sol, b, atol=1e-6) + # test jac input as array -- not a function + sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), A, maxiter=2, + f_tol=1e-6, line_search=None, verbose=0) + np.testing.assert_allclose(A @ sol, b, atol=1e-6) + + def test_jac_sparse(self): + A = csr_array([[1, 2], [2, 1]]) + b = np.array([1, -1]) + self._check_autojac(A, b) + self._check_autojac((1 + 2j) * A, (2 + 2j) * b) + + def test_jac_ndarray(self): + A = np.array([[1, 2], [2, 1]]) + b = np.array([1, -1]) + self._check_autojac(A, b) + self._check_autojac((1 + 2j) * A, (2 + 2j) * b) + + +class TestJacobianDotSolve: + """ + Check that solve/dot methods in Jacobian approximations are consistent + """ + + def _func(self, x): + return x**2 - 1 + np.dot(self.A, x) + + def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw): + np.random.seed(123) + + N = 7 + + def rand(*a): + q = np.random.rand(*a) + if complex: + q = q + 1j*np.random.rand(*a) + return q + + def assert_close(a, b, msg): + d = abs(a - b).max() + f = tol + abs(b).max()*tol + if d > f: + raise AssertionError(f'{msg}: err {d:g}') + + self.A = rand(N, N) + + # initialize + x0 = np.random.rand(N) + jac = jac_cls(**kw) + jac.setup(x0, self._func(x0), self._func) + + # check consistency + for k in range(2*N): + v = rand(N) + + if hasattr(jac, '__array__'): + Jd = np.array(jac) + if hasattr(jac, 'solve'): + Gv = jac.solve(v) + Gv2 = np.linalg.solve(Jd, v) + assert_close(Gv, Gv2, 'solve vs array') + if hasattr(jac, 'rsolve'): + Gv = jac.rsolve(v) + Gv2 = np.linalg.solve(Jd.T.conj(), v) + assert_close(Gv, Gv2, 'rsolve vs array') + if hasattr(jac, 'matvec'): + Jv = jac.matvec(v) + Jv2 = np.dot(Jd, v) + assert_close(Jv, Jv2, 'dot vs array') + if hasattr(jac, 'rmatvec'): + Jv = jac.rmatvec(v) + Jv2 = np.dot(Jd.T.conj(), v) + assert_close(Jv, Jv2, 'rmatvec vs array') + + if hasattr(jac, 'matvec') and hasattr(jac, 'solve'): + Jv = jac.matvec(v) + Jv2 = jac.solve(jac.matvec(Jv)) + assert_close(Jv, Jv2, 'dot vs solve') + + if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'): + Jv = jac.rmatvec(v) + Jv2 = jac.rmatvec(jac.rsolve(Jv)) + assert_close(Jv, Jv2, 'rmatvec vs rsolve') + + x = rand(N) + jac.update(x, self._func(x)) + + def test_broyden1(self): + self._check_dot(nonlin.BroydenFirst, complex=False) + self._check_dot(nonlin.BroydenFirst, complex=True) + + def test_broyden2(self): + self._check_dot(nonlin.BroydenSecond, complex=False) + self._check_dot(nonlin.BroydenSecond, complex=True) + + def test_anderson(self): + self._check_dot(nonlin.Anderson, complex=False) + self._check_dot(nonlin.Anderson, complex=True) + + def test_diagbroyden(self): + self._check_dot(nonlin.DiagBroyden, complex=False) + self._check_dot(nonlin.DiagBroyden, complex=True) + + def test_linearmixing(self): + self._check_dot(nonlin.LinearMixing, complex=False) + self._check_dot(nonlin.LinearMixing, complex=True) + + def test_excitingmixing(self): + self._check_dot(nonlin.ExcitingMixing, complex=False) + self._check_dot(nonlin.ExcitingMixing, complex=True) + + def test_krylov(self): + self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3) + self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3) + + +class TestNonlinOldTests: + """ Test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def test_broyden1(self): + x = nonlin.broyden1(F, F.xin, iter=12, alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_broyden2(self): + x = nonlin.broyden2(F, F.xin, iter=12, alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_anderson(self): + x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5) + assert_(nonlin.norm(x) < 0.33) + + def test_linearmixing(self): + x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5) + assert_(nonlin.norm(x) < 1e-7) + assert_(nonlin.norm(F(x)) < 1e-7) + + def test_exciting(self): + x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5) + assert_(nonlin.norm(x) < 1e-5) + assert_(nonlin.norm(F(x)) < 1e-5) + + def test_diagbroyden(self): + x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1) + assert_(nonlin.norm(x) < 1e-8) + assert_(nonlin.norm(F(x)) < 1e-8) + + def test_root_broyden1(self): + res = root(F, F.xin, method='broyden1', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_broyden2(self): + res = root(F, F.xin, method='broyden2', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_anderson(self): + res = root(F, F.xin, method='anderson', + options={'nit': 12, + 'jac_options': {'alpha': 0.03, 'M': 5}}) + assert_(nonlin.norm(res.x) < 0.33) + + def test_root_linearmixing(self): + res = root(F, F.xin, method='linearmixing', + options={'nit': 60, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-7) + assert_(nonlin.norm(res.fun) < 1e-7) + + def test_root_excitingmixing(self): + res = root(F, F.xin, method='excitingmixing', + options={'nit': 20, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-5) + assert_(nonlin.norm(res.fun) < 1e-5) + + def test_root_diagbroyden(self): + res = root(F, F.xin, method='diagbroyden', + options={'nit': 11, + 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-8) + assert_(nonlin.norm(res.fun) < 1e-8) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..84a3d8115aa067d46a2e10d8581165dc838c4d6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py @@ -0,0 +1,3163 @@ +""" +Unit tests for optimization routines from optimize.py + +Authors: + Ed Schofield, Nov 2005 + Andrew Straw, April 2008 + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import itertools +import platform +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_almost_equal, + assert_no_warnings, assert_warns, + assert_array_less, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +from scipy import optimize +from scipy.optimize._minimize import Bounds, NonlinearConstraint +from scipy.optimize._minimize import (MINIMIZE_METHODS, + MINIMIZE_METHODS_NEW_CB, + MINIMIZE_SCALAR_METHODS) +from scipy.optimize._linprog import LINPROG_METHODS +from scipy.optimize._root import ROOT_METHODS +from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS +from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS +from scipy.optimize._optimize import MemoizeJac, show_options, OptimizeResult +from scipy.optimize import rosen, rosen_der, rosen_hess + +from scipy.sparse import (coo_matrix, csc_matrix, csr_matrix, coo_array, + csr_array, csc_array) + +def test_check_grad(): + # Verify if check_grad is able to estimate the derivative of the + # expit (logistic sigmoid) function. + + def expit(x): + return 1 / (1 + np.exp(-x)) + + def der_expit(x): + return np.exp(-x) / (1 + np.exp(-x))**2 + + x0 = np.array([1.5]) + + r = optimize.check_grad(expit, der_expit, x0) + assert_almost_equal(r, 0) + r = optimize.check_grad(expit, der_expit, x0, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6) + assert_almost_equal(r, 0) + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + # Check if the epsilon parameter is being considered. + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0) + assert r > 1e-7 + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1, + direction='random', seed=1234) - 0) + assert r > 1e-7 + + def x_sinx(x): + return (x*np.sin(x)).sum() + + def der_x_sinx(x): + return np.sin(x) + x*np.cos(x) + + x0 = np.arange(0, 2, 0.2) + + r = optimize.check_grad(x_sinx, der_x_sinx, x0, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + assert_raises(ValueError, optimize.check_grad, + x_sinx, der_x_sinx, x0, + direction='random_projection', seed=1234) + + # checking can be done for derivatives of vector valued functions + r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0, + direction='all', seed=1234) + assert r < 5e-7 + + +class CheckOptimize: + """ Base test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def setup_method(self): + self.F = np.array([[1, 1, 1], + [1, 1, 0], + [1, 0, 1], + [1, 0, 0], + [1, 0, 0]]) + self.K = np.array([1., 0.3, 0.5]) + self.startparams = np.zeros(3, np.float64) + self.solution = np.array([0., -0.524869316, 0.487525860]) + self.maxiter = 1000 + self.funccalls = 0 + self.gradcalls = 0 + self.trace = [] + + def func(self, x): + self.funccalls += 1 + if self.funccalls > 6000: + raise RuntimeError("too many iterations in optimization routine") + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + f = logZ - np.dot(self.K, x) + self.trace.append(np.copy(x)) + return f + + def grad(self, x): + self.gradcalls += 1 + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.transpose(), p) - self.K + + def hess(self, x): + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.T, + np.dot(np.diag(p), self.F - np.dot(self.F.T, p))) + + def hessp(self, x, p): + return np.dot(self.hess(x), p) + + +class CheckOptimizeParameterized(CheckOptimize): + + def test_cg(self): + # conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='CG', jac=self.grad, + options=opts) + params, fopt, func_calls, grad_calls, warnflag = \ + res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + else: + retval = optimize.fmin_cg(self.func, self.startparams, + self.grad, (), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 9, self.funccalls + assert self.gradcalls == 7, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[2:4], + [[0, -0.5, 0.5], + [0, -5.05700028e-01, 4.95985862e-01]], + atol=1e-14, rtol=1e-7) + + def test_cg_cornercase(self): + def f(r): + return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2 + + # Check several initial guesses. (Too far away from the + # minimum, the function ends up in the flat region of exp.) + for x0 in np.linspace(-0.75, 3, 71): + sol = optimize.minimize(f, [x0], method='CG') + assert sol.success + assert_allclose(sol.x, [0.5], rtol=1e-5) + + def test_bfgs(self): + # Broyden-Fletcher-Goldfarb-Shanno optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, + jac=self.grad, method='BFGS', args=(), + options=opts) + + params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = ( + res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + else: + retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, gopt, Hopt, + func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 10, self.funccalls + assert self.gradcalls == 8, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[6:8], + [[0, -5.25060743e-01, 4.87748473e-01], + [0, -5.24885582e-01, 4.87530347e-01]], + atol=1e-14, rtol=1e-7) + + def test_bfgs_hess_inv0_neg(self): + # Ensure that BFGS does not accept neg. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': -np.eye(5)} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_semipos(self): + # Ensure that BFGS does not accept semi pos. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + hess_inv0 = np.eye(5) + hess_inv0[0, 0] = 0 + opts = {'disp': self.disp, 'hess_inv0': hess_inv0} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_sanity(self): + # Ensure that BFGS handles `hess_inv0` parameter correctly. + fun = optimize.rosen + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': 1e-2 * np.eye(5)} + res = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options=opts) + res_true = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options={'disp': self.disp}) + assert_allclose(res.fun, res_true.fun, atol=1e-6) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_bfgs_infinite(self): + # Test corner case where -Inf is the minimum. See gh-2019. + def func(x): + return -np.e ** (-x) + def fprime(x): + return -func(x) + x0 = [0] + with np.errstate(over='ignore'): + if self.use_wrapper: + opts = {'disp': self.disp} + x = optimize.minimize(func, x0, jac=fprime, method='BFGS', + args=(), options=opts)['x'] + else: + x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp) + assert not np.isfinite(func(x)) + + def test_bfgs_xrtol(self): + # test for #17345 to test xrtol parameter + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'xrtol': 1e-3}) + ref = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'gtol': 1e-3}) + assert res.nit != ref.nit + + def test_bfgs_c1(self): + # test for #18977 insufficiently low value of c1 leads to precision loss + # for poor starting parameters + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + res_c1_small = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-8}) + res_c1_big = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-1}) + + assert res_c1_small.nfev > res_c1_big.nfev + + def test_bfgs_c2(self): + # test that modification of c2 parameter + # results in different number of iterations + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res_default = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': .9}) + res_mod = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': 1e-2}) + assert res_default.nit > res_mod.nit + + @pytest.mark.parametrize(["c1", "c2"], [[0.5, 2], + [-0.1, 0.1], + [0.2, 0.1]]) + def test_invalid_c1_c2(self, c1, c2): + with pytest.raises(ValueError, match="'c1' and 'c2'"): + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + optimize.minimize(optimize.rosen, x0, method='cg', + options={'c1': c1, 'c2': c2}) + + def test_powell(self): + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + # params[0] does not affect the objective function + assert_allclose(params[1:], self.solution[1:], atol=5e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + # + # However, some leeway must be added: the exact evaluation + # count is sensitive to numerical error, and floating-point + # computations are not bit-for-bit reproducible across + # machines, and when using e.g., MKL, data alignment + # etc., affect the rounding error. + # + assert self.funccalls <= 116 + 20, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + @pytest.mark.xfail(reason="This part of test_powell fails on some " + "platforms, but the solution returned by powell is " + "still valid.") + def test_powell_gh14014(self): + # This part of test_powell started failing on some CI platforms; + # see gh-14014. Since the solution is still correct and the comments + # in test_powell suggest that small differences in the bits are known + # to change the "trace" of the solution, seems safe to xfail to get CI + # green now and investigate later. + + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[34:39], + [[0.72949016, -0.44156936, 0.47100962], + [0.72949016, -0.44156936, 0.48052496], + [1.45898031, -0.88313872, 0.95153458], + [0.72949016, -0.44156936, 0.47576729], + [1.72949016, -0.44156936, 0.47576729]], + atol=1e-14, rtol=1e-7) + + def test_powell_bounded(self): + # Powell (direction set) optimization routine + # same as test_powell above, but with bounds + bounds = [(-np.pi, np.pi) for _ in self.startparams] + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + bounds=bounds, + method='Powell', options=opts) + params, func_calls = (res['x'], res['nfev']) + + assert func_calls == self.funccalls + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6, rtol=1e-5) + + # The exact evaluation count is sensitive to numerical error, and + # floating-point computations are not bit-for-bit reproducible + # across machines, and when using e.g. MKL, data alignment etc. + # affect the rounding error. + # It takes 155 calls on my machine, but we can add the same +20 + # margin as is used in `test_powell` + assert self.funccalls <= 155 + 20 + assert self.gradcalls == 0 + + def test_neldermead(self): + # Nelder-Mead simplex algorithm + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['nit'], res['nfev'], + res['status']) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 167, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[76:78], + [[0.1928968, -0.62780447, 0.35166118], + [0.19572515, -0.63648426, 0.35838135]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex(self): + # Nelder-Mead simplex algorithm + simplex = np.zeros((4, 3)) + simplex[...] = self.startparams + for j in range(3): + simplex[j+1, j] += 0.1 + + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': True, 'initial_simplex': simplex} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = (res['x'], + res['fun'], + res['nit'], + res['nfev'], + res['status']) + assert_allclose(res['allvecs'][0], simplex[0]) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.17.0. Don't allow them to increase. + assert self.funccalls == 100, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.15.0 + assert_allclose(self.trace[50:52], + [[0.14687474, -0.5103282, 0.48252111], + [0.14474003, -0.5282084, 0.48743951]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex_bad(self): + # Check it fails with a bad simplices + bad_simplices = [] + + simplex = np.zeros((3, 2)) + simplex[...] = self.startparams[:2] + for j in range(2): + simplex[j+1, j] += 0.1 + bad_simplices.append(simplex) + + simplex = np.zeros((3, 3)) + bad_simplices.append(simplex) + + for simplex in bad_simplices: + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': False, 'initial_simplex': simplex} + assert_raises(ValueError, + optimize.minimize, + self.func, + self.startparams, + args=(), + method='Nelder-mead', + options=opts) + else: + assert_raises(ValueError, optimize.fmin, + self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + def test_neldermead_x0_ub(self): + # checks whether minimisation occurs correctly for entries where + # x0 == ub + # gh19991 + def quad(x): + return np.sum(x**2) + + res = optimize.minimize( + quad, + [1], + bounds=[(0, 1.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0]) + + res = optimize.minimize( + quad, + [1, 2], + bounds=[(0, 1.), (1, 3.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0, 1]) + + def test_ncg_negative_maxiter(self): + # Regression test for gh-8241 + opts = {'maxiter': -1} + result = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts) + assert result.status == 1 + + def test_ncg_zero_xtol(self): + # Regression test for gh-20214 + def cosine(x): + return np.cos(x[0]) + + def jac(x): + return -np.sin(x[0]) + + x0 = [0.1] + xtol = 0 + result = optimize.minimize(cosine, + x0=x0, + jac=jac, + method="newton-cg", + options=dict(xtol=xtol)) + assert result.status == 0 + assert_almost_equal(result.x[0], np.pi) + + def test_ncg(self): + # line-search Newton conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 7, self.funccalls + assert self.gradcalls <= 22, self.gradcalls # 0.13.0 + # assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hess(self): + # Newton conjugate gradient with Hessian + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hess=self.hess, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess=self.hess, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls <= 7, self.funccalls # gh10673 + assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hessp(self): + # Newton conjugate gradient with Hessian times a vector p. + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hessp=self.hessp, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess_p=self.hessp, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls <= 7, self.funccalls # gh10673 + assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + +def test_maxfev_test(): + rng = np.random.default_rng(271707100830272976862395227613146332411) + + def cost(x): + return rng.random(1) * 1000 # never converged problem + + for imaxfev in [1, 10, 50]: + # "TNC" and "L-BFGS-B" also supports max function evaluation, but + # these may violate the limit because of evaluating gradients + # by numerical differentiation. See the discussion in PR #14805. + for method in ['Powell', 'Nelder-Mead']: + result = optimize.minimize(cost, rng.random(10), + method=method, + options={'maxfev': imaxfev}) + assert result["nfev"] == imaxfev + + +def test_wrap_scalar_function_with_validation(): + + def func_(x): + return x + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + for i in range(5): + func(np.asarray(i)) + assert fcalls[0] == i+1 + + msg = "Too many function calls" + with assert_raises(optimize._optimize._MaxFuncCallError, match=msg): + func(np.asarray(i)) # exceeded maximum function call + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + msg = "The user-provided objective function must return a scalar value." + with assert_raises(ValueError, match=msg): + func(np.array([1, 1])) + + +def test_obj_func_returns_scalar(): + match = ("The user-provided " + "objective function must " + "return a scalar value.") + with assert_raises(ValueError, match=match): + optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS') + + +def test_neldermead_iteration_num(): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + res = optimize._minimize._minimize_neldermead(optimize.rosen, x0, + xatol=1e-8) + assert res.nit <= 339 + + +def test_neldermead_respect_fp(): + # Nelder-Mead should respect the fp type of the input + function + x0 = np.array([5.0, 4.0]).astype(np.float32) + def rosen_(x): + assert x.dtype == np.float32 + return optimize.rosen(x) + + optimize.minimize(rosen_, x0, method='Nelder-Mead') + + +def test_neldermead_xatol_fatol(): + # gh4484 + # test we can call with fatol, xatol specified + def func(x): + return x[0] ** 2 + x[1] ** 2 + + optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2, + xatol=1e-3, fatol=1e-3) + + +def test_neldermead_adaptive(): + def func(x): + return np.sum(x ** 2) + p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, + 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652, + 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474] + + res = optimize.minimize(func, p0, method='Nelder-Mead') + assert_equal(res.success, False) + + res = optimize.minimize(func, p0, method='Nelder-Mead', + options={'adaptive': True}) + assert_equal(res.success, True) + + +def test_bounded_powell_outsidebounds(): + # With the bounded Powell method if you start outside the bounds the final + # should still be within the bounds (provided that the user doesn't make a + # bad choice for the `direc` argument). + def func(x): + return np.sum(x ** 2) + bounds = (-1, 1), (-1, 1), (-1, 1) + x0 = [-4, .5, -.8] + + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, bounds=bounds, method="Powell") + assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6) + assert_equal(res.success, True) + assert_equal(res.status, 0) + + # However, now if we change the `direc` argument such that the + # set of vectors does not span the parameter space, then we may + # not end up back within the bounds. Here we see that the first + # parameter cannot be updated! + direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, + bounds=bounds, method="Powell", + options={'direc': direc}) + assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6) + assert_equal(res.success, False) + assert_equal(res.status, 4) + + +def test_bounded_powell_vs_powell(): + # here we test an example where the bounded Powell method + # will return a different result than the standard Powell + # method. + + # first we test a simple example where the minimum is at + # the origin and the minimum that is within the bounds is + # larger than the minimum at the origin. + def func(x): + return np.sum(x ** 2) + bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2) + x0 = [-2.1, -5.2, 1.9, 0, -2] + + options = {'ftol': 1e-10, 'xtol': 1e-10} + + res_powell = optimize.minimize(func, x0, method="Powell", options=options) + assert_allclose(res_powell.x, 0., atol=1e-6) + assert_allclose(res_powell.fun, 0., atol=1e-6) + + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # now we test bounded Powell but with a mix of inf bounds. + bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2) + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # next we test an example where the global minimum is within + # the bounds, but the bounded Powell method performs better + # than the standard Powell method. + def func(x): + t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1]) + t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2])) + return t**2 + + bounds = [(-2, 5)] * 3 + x0 = [-0.5, -0.5, -0.5] + + res_powell = optimize.minimize(func, x0, method="Powell") + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6) + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + # next we test the previous example where the we provide Powell + # with (-inf, inf) bounds, and compare it to providing Powell + # with no bounds. They should end up the same. + bounds = [(-np.inf, np.inf)] * 3 + + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6) + assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6) + assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6) + + # now test when x0 starts outside of the bounds. + x0 = [45.46254415, -26.52351498, 31.74830248] + bounds = [(-2, 5)] * 3 + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + +def test_onesided_bounded_powell_stability(): + # When the Powell method is bounded on only one side, a + # np.tan transform is done in order to convert it into a + # completely bounded problem. Here we do some simple tests + # of one-sided bounded Powell where the optimal solutions + # are large to test the stability of the transformation. + kwargs = {'method': 'Powell', + 'bounds': [(-np.inf, 1e6)] * 3, + 'options': {'ftol': 1e-8, 'xtol': 1e-8}} + x0 = [1, 1, 1] + + # df/dx is constant. + def f(x): + return -np.sum(x) + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -3e6, atol=1e-4) + + # df/dx gets smaller and smaller. + def f(x): + return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** (0.1)) + + # df/dx gets larger and larger. + def f(x): + return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7) + + # df/dx gets larger for some of the variables and smaller for others. + def f(x): + t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1) + t *= (1 if np.all(x > 0) else -1) + return t + + kwargs['bounds'] = [(-np.inf, 1e3)] * 3 + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7) + + +class TestOptimizeWrapperDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = True + + +class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = False + + +class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = True + + +class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = False + + +class TestOptimizeSimple(CheckOptimize): + + def test_bfgs_nan(self): + # Test corner case where nan is fed to optimizer. See gh-2067. + def func(x): + return x + def fprime(x): + return np.ones_like(x) + x0 = [np.nan] + with np.errstate(over='ignore', invalid='ignore'): + x = optimize.fmin_bfgs(func, x0, fprime, disp=False) + assert np.isnan(func(x)) + + def test_bfgs_nan_return(self): + # Test corner cases where fun returns NaN. See gh-4793. + + # First case: NaN from first call. + def func(x): + return np.nan + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0) + + assert np.isnan(result['fun']) + assert result['success'] is False + + # Second case: NaN from second call. + def func(x): + return 0 if x == 0 else np.nan + def fprime(x): + return np.ones_like(x) # Steer away from zero. + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0, jac=fprime) + + assert np.isnan(result['fun']) + assert result['success'] is False + + def test_bfgs_numerical_jacobian(self): + # BFGS with numerical Jacobian and a vector epsilon parameter. + # define the epsilon parameter using a random vector + epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution)) + + params = optimize.fmin_bfgs(self.func, self.startparams, + epsilon=epsilon, args=(), + maxiter=self.maxiter, disp=False) + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_jac(self): + methods = ['BFGS', 'CG', 'TNC'] + jacs = ['2-point', '3-point', None] + for method, jac in itertools.product(methods, jacs): + result = optimize.minimize(self.func, self.startparams, + method=method, jac=jac) + assert_allclose(self.func(result.x), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_hess(self): + # test that all the methods that require hess can use finite-difference + # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is + # wrapped in a hessp function + # dogleg, trust-exact actually require true hessians at the moment, so + # they're excluded. + methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov'] + hesses = FD_METHODS + (optimize.BFGS,) + for method, hess in itertools.product(methods, hesses): + if hess is optimize.BFGS: + hess = hess() + result = optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=hess) + assert result.success + + # check that the methods demand some sort of Hessian specification + # Newton-CG creates its own hessp, and trust-constr doesn't need a hess + # specified either + methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact'] + for method in methods: + with pytest.raises(ValueError): + optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=None) + + def test_bfgs_gh_2169(self): + def f(x): + if x < 0: + return 1.79769313e+308 + else: + return x + 1./x + xs = optimize.fmin_bfgs(f, [10.], disp=False) + assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) + + def test_bfgs_double_evaluations(self): + # check BFGS does not evaluate twice in a row at same point + def f(x): + xp = x[0] + assert xp not in seen + seen.add(xp) + return 10*x**2, 20*x + + seen = set() + optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7) + + def test_l_bfgs_b(self): + # limited-memory bound-constrained BFGS algorithm + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + self.grad, args=(), + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 7, self.funccalls + assert self.gradcalls == 5, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + # test fixed in gh10673 + assert_allclose(self.trace[3:5], + [[8.117083e-16, -5.196198e-01, 4.897617e-01], + [0., -0.52489628, 0.48753042]], + atol=1e-14, rtol=1e-7) + + def test_l_bfgs_b_numjac(self): + # L-BFGS-B with numerical Jacobian + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + approx_grad=True, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with combined objective function and Jacobian + def fun(x): + return self.func(x), self.grad(x) + + retval = optimize.fmin_l_bfgs_b(fun, self.startparams, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_maxiter(self): + # gh7854 + # Ensure that not more than maxiters are ever run. + class Callback: + def __init__(self): + self.nit = 0 + self.fun = None + self.x = None + + def __call__(self, x): + self.x = x + self.fun = optimize.rosen(x) + self.nit += 1 + + c = Callback() + res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b', + callback=c, options={'maxiter': 5}) + + assert_equal(res.nit, 5) + assert_almost_equal(res.x, c.x) + assert_almost_equal(res.fun, c.fun) + assert_equal(res.status, 1) + assert res.success is False + assert_equal(res.message, + 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT') + + def test_minimize_l_bfgs_b(self): + # Minimize with L-BFGS-B method + opts = {'disp': False, 'maxiter': self.maxiter} + r = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + assert_allclose(self.func(r.x), self.func(self.solution), + atol=1e-6) + assert self.gradcalls == r.njev + + self.funccalls = self.gradcalls = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', options=opts) + # check that function evaluations in approximate jacobian are counted + # assert_(ra.nfev > r.nfev) + assert self.funccalls == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + self.funccalls = self.gradcalls = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, jac='3-point', + method='L-BFGS-B', options=opts) + assert self.funccalls == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + def test_minimize_l_bfgs_b_ftol(self): + # Check that the `ftol` parameter in l_bfgs_b works as expected + v0 = None + for tol in [1e-1, 1e-4, 1e-7, 1e-10]: + opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol} + sol = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + v = self.func(sol.x) + + if v0 is None: + v0 = v + else: + assert v < v0 + + assert_allclose(v, self.func(self.solution), rtol=tol) + + def test_minimize_l_bfgs_maxls(self): + # check that the maxls is passed down to the Fortran routine + sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]), + method='L-BFGS-B', jac=optimize.rosen_der, + options={'disp': False, 'maxls': 1}) + assert not sol.success + + def test_minimize_l_bfgs_b_maxfun_interruption(self): + # gh-6162 + f = optimize.rosen + g = optimize.rosen_der + values = [] + x0 = np.full(7, 1000) + + def objfun(x): + value = f(x) + values.append(value) + return value + + # Look for an interesting test case. + # Request a maxfun that stops at a particularly bad function + # evaluation somewhere between 100 and 300 evaluations. + low, medium, high = 30, 100, 300 + optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high) + v, k = max((y, i) for i, y in enumerate(values[medium:])) + maxfun = medium + k + # If the minimization strategy is reasonable, + # the minimize() result should not be worse than the best + # of the first 30 function evaluations. + target = min(values[:low]) + xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun) + assert_array_less(fmin, target) + + def test_custom(self): + # This function comes from the documentation example. + def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = x0 + besty = fun(x0) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for dim in range(np.size(x0)): + for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]: + testx = np.copy(bestx) + testx[dim] = s + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + x0 = [1.35, 0.9, 0.8, 1.1, 1.2] + res = optimize.minimize(optimize.rosen, x0, method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) + + def test_gh10771(self): + # check that minimize passes bounds and constraints to a custom + # minimizer without altering them. + bounds = [(-2, 2), (0, 3)] + constraints = 'constraints' + + def custmin(fun, x0, **options): + assert options['bounds'] is bounds + assert options['constraints'] is constraints + return optimize.OptimizeResult() + + x0 = [1, 1] + optimize.minimize(optimize.rosen, x0, method=custmin, + bounds=bounds, constraints=constraints) + + def test_minimize_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return x**2*y**2 + x**4 + 1 + + def dfunc(z): + x, y = z + return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) + + for method in ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp']: + if method in ('nelder-mead', 'powell', 'cobyla'): + jac = None + else: + jac = dfunc + + sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10, + method=method) + sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0, + method=method) + assert func(sol1.x) < func(sol2.x), \ + f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" + + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547 + @pytest.mark.parametrize('method', + ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs', + 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc', + 'fmin_slsqp'] + MINIMIZE_METHODS) + def test_minimize_callback_copies_array(self, method): + # Check that arrays passed to callbacks are not modified + # inplace by the optimizer afterward + + if method in ('fmin_tnc', 'fmin_l_bfgs_b'): + def func(x): + return optimize.rosen(x), optimize.rosen_der(x) + else: + func = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + x0 = np.zeros(10) + + # Set options + kwargs = {} + if method.startswith('fmin'): + routine = getattr(optimize, method) + if method == 'fmin_slsqp': + kwargs['iter'] = 5 + elif method == 'fmin_tnc': + kwargs['maxfun'] = 100 + elif method in ('fmin', 'fmin_powell'): + kwargs['maxiter'] = 3500 + else: + kwargs['maxiter'] = 5 + else: + def routine(*a, **kw): + kw['method'] = method + return optimize.minimize(*a, **kw) + + if method == 'tnc': + kwargs['options'] = dict(maxfun=100) + else: + kwargs['options'] = dict(maxiter=5) + + if method in ('fmin_ncg',): + kwargs['fprime'] = jac + elif method in ('newton-cg',): + kwargs['jac'] = jac + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = jac + kwargs['hess'] = hess + + # Run with callback + results = [] + + def callback(x, *args, **kwargs): + assert not isinstance(x, optimize.OptimizeResult) + results.append((x, np.copy(x))) + + routine(func, x0, callback=callback, **kwargs) + + # Check returned arrays coincide with their copies + # and have no memory overlap + assert len(results) > 2 + assert all(np.all(x == y) for x, y in results) + combinations = itertools.combinations(results, 2) + assert not any(np.may_share_memory(x[0], y[0]) for x, y in combinations) + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', + 'bfgs', 'newton-cg', 'l-bfgs-b', + 'tnc', 'cobyla', 'slsqp']) + def test_no_increase(self, method): + # Check that the solver doesn't return a value worse than the + # initial point. + + def func(x): + return (x - 1)**2 + + def bad_grad(x): + # purposefully invalid gradient function, simulates a case + # where line searches start failing + return 2*(x - 1) * (-1) - 2 + + x0 = np.array([2.0]) + f0 = func(x0) + jac = bad_grad + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + if method in ['nelder-mead', 'powell', 'cobyla']: + jac = None + sol = optimize.minimize(func, x0, jac=jac, method=method, + options=options) + assert_equal(func(sol.x), sol.fun) + + if method == 'slsqp': + pytest.xfail("SLSQP returns slightly worse") + assert func(sol.x) <= f0 + + def test_slsqp_respect_bounds(self): + # Regression test for gh-3108 + def f(x): + return sum((x - np.array([1., 2., 3., 4.]))**2) + + def cons(x): + a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]]) + return np.concatenate([np.dot(a, x) + np.array([5, 10]), x]) + + x0 = np.array([0.5, 1., 1.5, 2.]) + res = optimize.minimize(f, x0, method='slsqp', + constraints={'type': 'ineq', 'fun': cons}) + assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) + + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS', + 'Newton-CG', 'L-BFGS-B', 'SLSQP', + 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov']) + def test_respect_maxiter(self, method): + # Check that the number of iterations equals max_iter, assuming + # convergence doesn't establish before + MAXITER = 4 + + x0 = np.zeros(10) + + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + + # Set options + kwargs = {'method': method, 'options': dict(maxiter=MAXITER)} + + if method in ('Newton-CG',): + kwargs['jac'] = sf.grad + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = sf.grad + kwargs['hess'] = sf.hess + + sol = optimize.minimize(sf.fun, x0, **kwargs) + assert sol.nit == MAXITER + assert sol.nfev >= sf.nfev + if hasattr(sol, 'njev'): + assert sol.njev >= sf.ngev + + # method specific tests + if method == 'SLSQP': + assert sol.status == 9 # Iteration limit reached + + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', + 'fmin', 'fmin_powell']) + def test_runtime_warning(self, method): + x0 = np.zeros(10) + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + options = {"maxiter": 1, "disp": True} + with pytest.warns(RuntimeWarning, + match=r'Maximum number of iterations'): + if method.startswith('fmin'): + routine = getattr(optimize, method) + routine(sf.fun, x0, **options) + else: + optimize.minimize(sf.fun, x0, method=method, options=options) + + def test_respect_maxiter_trust_constr_ineq_constraints(self): + # special case of minimization with trust-constr and inequality + # constraints to check maxiter limit is obeyed when using internal + # method 'tr_interior_point' + MAXITER = 4 + f = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + def fun(x): + return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + cons = ({'type': 'ineq', + 'fun': fun},) + + x0 = np.zeros(10) + sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess, + method='trust-constr', + options=dict(maxiter=MAXITER)) + assert sol.nit == MAXITER + + def test_minimize_automethod(self): + def f(x): + return x**2 + + def cons(x): + return x - 2 + + x0 = np.array([10.]) + sol_0 = optimize.minimize(f, x0) + sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', + 'fun': cons}]) + sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)]) + sol_3 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(5, 10)]) + sol_4 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(1, 10)]) + for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]: + assert sol.success + assert_allclose(sol_0.x, 0, atol=1e-7) + assert_allclose(sol_1.x, 2, atol=1e-7) + assert_allclose(sol_2.x, 5, atol=1e-7) + assert_allclose(sol_3.x, 5, atol=1e-7) + assert_allclose(sol_4.x, 2, atol=1e-7) + + def test_minimize_coerce_args_param(self): + # Regression test for gh-3503 + def Y(x, c): + return np.sum((x-c)**2) + + def dY_dx(x, c=None): + return 2*(x-c) + + c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) + xinit = np.random.randn(len(c)) + optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS") + + def test_initial_step_scaling(self): + # Check that optimizer initial step is not huge even if the + # function and gradients are + + scales = [1e-50, 1, 1e50] + methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG'] + + def f(x): + if first_step_size[0] is None and x[0] != x0[0]: + first_step_size[0] = abs(x[0] - x0[0]) + if abs(x).max() > 1e4: + raise AssertionError("Optimization stepped far away!") + return scale*(x[0] - 1)**2 + + def g(x): + return np.array([scale*(x[0] - 1)]) + + for scale, method in itertools.product(scales, methods): + if method in ('CG', 'BFGS'): + options = dict(gtol=scale*1e-8) + else: + options = dict() + + if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'): + # XXX: return initial point if they see small gradient + continue + + x0 = [-1.0] + first_step_size = [None] + res = optimize.minimize(f, x0, jac=g, method=method, + options=options) + + err_msg = f"{method} {scale}: {first_step_size}: {res}" + + assert res.success, err_msg + assert_allclose(res.x, [1.0], err_msg=err_msg) + assert res.nit <= 3, err_msg + + if scale > 1e-10: + if method in ('CG', 'BFGS'): + assert_allclose(first_step_size[0], 1.01, err_msg=err_msg) + else: + # Newton-CG and L-BFGS-B use different logic for the first + # step, but are both scaling invariant with step sizes ~ 1 + assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg + else: + # step size has upper bound of ||grad||, so line + # search makes many small steps + pass + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp', 'trust-constr', + 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']) + def test_nan_values(self, method): + # Check nan values result to failed exit status + np.random.seed(1234) + + count = [0] + + def func(x): + return np.nan + + def func2(x): + count[0] += 1 + if count[0] > 2: + return np.nan + else: + return np.random.rand() + + def grad(x): + return np.array([1.0]) + + def hess(x): + return np.array([[1.0]]) + + x0 = np.array([1.0]) + + needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg') + needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg') + + funcs = [func, func2] + grads = [grad] if needs_grad else [grad, None] + hesss = [hess] if needs_hess else [hess, None] + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + for f, g, h in itertools.product(funcs, grads, hesss): + count = [0] + sol = optimize.minimize(f, x0, jac=g, hess=h, method=method, + options=options) + assert_equal(sol.success, False) + + @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs', + 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp', 'trust-constr', + 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']) + def test_duplicate_evaluations(self, method): + # check that there are no duplicate evaluations for any methods + jac = hess = None + if method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg'): + jac = self.grad + if method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg'): + hess = self.hess + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + # for trust-constr + sup.filter(UserWarning, "delta_grad == 0.*") + optimize.minimize(self.func, self.startparams, + method=method, jac=jac, hess=hess) + + for i in range(1, len(self.trace)): + if np.array_equal(self.trace[i - 1], self.trace[i]): + raise RuntimeError( + f"Duplicate evaluations made by {method}") + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB) + @pytest.mark.parametrize('new_cb_interface', [0, 1, 2]) + def test_callback_stopiteration(self, method, new_cb_interface): + # Check that if callback raises StopIteration, optimization + # terminates with the same result as if iterations were limited + + def f(x): + f.flag = False # check that f isn't called after StopIteration + return optimize.rosen(x) + f.flag = False + + def g(x): + f.flag = False + return optimize.rosen_der(x) + + def h(x): + f.flag = False + return optimize.rosen_hess(x) + + maxiter = 5 + + if new_cb_interface == 1: + def callback_interface(*, intermediate_result): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + elif new_cb_interface == 2: + class Callback: + def __call__(self, intermediate_result: OptimizeResult): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + callback_interface = Callback() + else: + def callback_interface(xk, *args): # type: ignore[misc] + callback() + + def callback(): + callback.i += 1 + callback.flag = False + if callback.i == maxiter: + callback.flag = True + raise StopIteration() + callback.i = 0 + callback.flag = False + + kwargs = {'x0': [1.1]*5, 'method': method, + 'fun': f, 'jac': g, 'hess': h} + + res = optimize.minimize(**kwargs, callback=callback_interface) + if method == 'nelder-mead': + maxiter = maxiter + 1 # nelder-mead counts differently + ref = optimize.minimize(**kwargs, options={'maxiter': maxiter}) + assert res.fun == ref.fun + assert_equal(res.x, ref.x) + assert res.nit == ref.nit == maxiter + assert res.status == (3 if method == 'trust-constr' else 99) + + def test_ndim_error(self): + msg = "'x0' must only have one dimension." + with assert_raises(ValueError, match=msg): + optimize.minimize(lambda x: x, np.ones((2, 1))) + + @pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc', + 'powell', 'cobyla', 'trust-constr')) + def test_minimize_invalid_bounds(self, method): + def f(x): + return np.sum(x**2) + + bounds = Bounds([1, 2], [3, 4]) + msg = 'The number of bounds is not compatible with the length of `x0`.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + bounds = Bounds([1, 6, 1], [3, 4, 2]) + msg = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + @pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell']) + def test_minimize_warnings_gh1953(self, method): + # test that minimize methods produce warnings rather than just using + # `print`; see gh-1953. + kwargs = {} if method=='powell' else {'jac': optimize.rosen_der} + warning_type = (RuntimeWarning if method=='powell' + else optimize.OptimizeWarning) + + options = {'disp': True, 'maxiter': 10} + with pytest.warns(warning_type, match='Maximum number'): + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + options['disp'] = False + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + +@pytest.mark.parametrize( + 'method', + ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead'] +) +def test_minimize_with_scalar(method): + # checks that minimize works with a scalar being provided to it. + def f(x): + return np.sum(x ** 2) + + res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method) + assert res.success + assert_allclose(res.x, [0.0], atol=1e-5) + + +class TestLBFGSBBounds: + def setup_method(self): + self.bounds = ((1, None), (None, None)) + self.solution = (1, 0) + + def fun(self, x, p=2.0): + return 1.0 / p * (x[0]**p + x[1]**p) + + def jac(self, x, p=2.0): + return x**(p - 1) + + def fj(self, x, p=2.0): + return self.fun(x, p), self.jac(x, p) + + def test_l_bfgs_b_bounds(self): + x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1], + fprime=self.jac, + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with fun and jac combined and extra arguments + x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ), + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_l_bfgs_b_bounds(self): + # Minimize with method='L-BFGS-B' with bounds + res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=self.bounds) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + @pytest.mark.parametrize('bounds', [ + ([(10, 1), (1, 10)]), + ([(1, 10), (10, 1)]), + ([(10, 1), (10, 1)]) + ]) + def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds): + with pytest.raises(ValueError, match='.*bound.*'): + optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=bounds) + + def test_minimize_l_bfgs_b_bounds_FD(self): + # test that initial starting value outside bounds doesn't raise + # an error (done with clipping). + # test all different finite differences combos, with and without args + + jacs = ['2-point', '3-point', None] + argss = [(2.,), ()] + for jac, args in itertools.product(jacs, argss): + res = optimize.minimize(self.fun, [0, -1], args=args, + method='L-BFGS-B', + jac=jac, bounds=self.bounds, + options={'finite_diff_rel_step': None}) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + +class TestOptimizeScalar: + def setup_method(self): + self.solution = 1.5 + + def fun(self, x, a=1.5): + """Objective function""" + return (x - a)**2 - 0.8 + + def test_brent(self): + x = optimize.brent(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(0, -1, 1)) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_golden(self): + x = optimize.golden(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, tol=0) + assert_allclose(x, self.solution) + + maxiter_test_cases = [0, 1, 5] + for maxiter in maxiter_test_cases: + x0 = optimize.golden(self.fun, maxiter=0, full_output=True) + x = optimize.golden(self.fun, maxiter=maxiter, full_output=True) + nfev0, nfev = x0[2], x[2] + assert_equal(nfev - nfev0, maxiter) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(0, -1, 1)) + + def test_fminbound(self): + x = optimize.fminbound(self.fun, 0, 1) + assert_allclose(x, 1, atol=1e-4) + + x = optimize.fminbound(self.fun, 1, 5) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.fminbound(self.fun, np.array([1]), np.array([5])) + assert_allclose(x, self.solution, atol=1e-6) + assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) + + def test_fminbound_scalar(self): + with pytest.raises(ValueError, match='.*must be finite scalars.*'): + optimize.fminbound(self.fun, np.zeros((1, 2)), 1) + + x = optimize.fminbound(self.fun, 1, np.array(5)) + assert_allclose(x, self.solution, atol=1e-6) + + def test_gh11207(self): + def fun(x): + return x**2 + optimize.fminbound(fun, 0, 0) + + def test_minimize_scalar(self): + # combine all tests above for the minimize_scalar wrapper + x = optimize.minimize_scalar(self.fun).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent') + assert x.success + + x = optimize.minimize_scalar(self.fun, method='Brent', + options=dict(maxiter=3)) + assert not x.success + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='golden', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,), + method='Bounded').x + assert_allclose(x, 1, atol=1e-4) + + x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]), + np.array([5])), + args=(np.array([1.5]), ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(5, 1), method='bounded', args=(1.5, )) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(np.zeros(2), 1), method='bounded', args=(1.5, )) + + x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_scalar_custom(self): + # This function comes from the documentation example. + def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = (bracket[1] + bracket[0]) / 2.0 + besty = fun(bestx) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for testx in [bestx - stepsize, bestx + stepsize]: + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + res = optimize.minimize_scalar(self.fun, bracket=(0, 4), + method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, self.solution, atol=1e-6) + + def test_minimize_scalar_coerce_args_param(self): + # Regression test for gh-3503 + optimize.minimize_scalar(self.fun, args=1.5) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_disp(self, method): + # test that all minimize_scalar methods accept a disp option. + for disp in [0, 1, 2, 3]: + optimize.minimize_scalar(self.fun, options={"disp": disp}) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_result_attributes(self, method): + kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {} + result = optimize.minimize_scalar(self.fun, method=method, **kwargs) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_nan_values(self, method): + # Check nan values result to failed exit status + np.random.seed(1234) + + count = [0] + + def func(x): + count[0] += 1 + if count[0] > 4: + return np.nan + else: + return x**2 + 0.1 * np.sin(x) + + bracket = (-1, 0, 1) + bounds = (-1, 1) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + count = [0] + + kwargs = {"bounds": bounds} if method == 'bounded' else {} + sol = optimize.minimize_scalar(func, bracket=bracket, + **kwargs, method=method, + options=dict(maxiter=20)) + assert_equal(sol.success, False) + + def test_minimize_scalar_defaults_gh10911(self): + # Previously, bounds were silently ignored unless `method='bounds'` + # was chosen. See gh-10911. Check that this is no longer the case. + def f(x): + return x**2 + + res = optimize.minimize_scalar(f) + assert_allclose(res.x, 0, atol=1e-8) + + res = optimize.minimize_scalar(f, bounds=(1, 100), + options={'xatol': 1e-10}) + assert_allclose(res.x, 1) + + def test_minimize_non_finite_bounds_gh10911(self): + # Previously, minimize_scalar misbehaved with infinite bounds. + # See gh-10911. Check that it now raises an error, instead. + msg = "Optimization bounds must be finite scalars." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(1, np.inf)) + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(np.nan, 1)) + + @pytest.mark.parametrize("method", ['brent', 'golden']) + def test_minimize_unbounded_method_with_bounds_gh10911(self, method): + # Previously, `bounds` were silently ignored when `method='brent'` or + # `method='golden'`. See gh-10911. Check that error is now raised. + msg = "Use of `bounds` is incompatible with..." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2)) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize("method", MINIMIZE_SCALAR_METHODS) + @pytest.mark.parametrize("tol", [1, 1e-6]) + @pytest.mark.parametrize("fshape", [(), (1,), (1, 1)]) + def test_minimize_scalar_dimensionality_gh16196(self, method, tol, fshape): + # gh-16196 reported that the output shape of `minimize_scalar` was not + # consistent when an objective function returned an array. Check that + # `res.fun` and `res.x` are now consistent. + def f(x): + return np.array(x**4).reshape(fshape) + + a, b = -0.1, 0.2 + kwargs = (dict(bracket=(a, b)) if method != "bounded" + else dict(bounds=(a, b))) + kwargs.update(dict(method=method, tol=tol)) + + res = optimize.minimize_scalar(f, **kwargs) + assert res.x.shape == res.fun.shape == f(res.x).shape == fshape + + @pytest.mark.parametrize('method', ['bounded', 'brent', 'golden']) + def test_minimize_scalar_warnings_gh1953(self, method): + # test that minimize_scalar methods produce warnings rather than just + # using `print`; see gh-1953. + def f(x): + return (x - 1)**2 + + kwargs = {} + kwd = 'bounds' if method == 'bounded' else 'bracket' + kwargs[kwd] = [-2, 10] + + options = {'disp': True, 'maxiter': 3} + with pytest.warns(optimize.OptimizeWarning, match='Maximum number'): + optimize.minimize_scalar(f, method=method, options=options, + **kwargs) + + options['disp'] = False + optimize.minimize_scalar(f, method=method, options=options, **kwargs) + + +class TestBracket: + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_errors_and_status_false(self): + # Check that `bracket` raises the errors it is supposed to + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, 1) + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, np.inf) + with pytest.raises(RuntimeError, match=message): + optimize.brent(f, brack=(-1, 1)) + with pytest.raises(RuntimeError, match=message): + optimize.golden(f, brack=(-1, 1)) + + def f(x): # gh-5899 + return -5 * x**5 + 4 * x**4 - 12 * x**3 + 11 * x**2 - 2 * x + 1 + + message = "No valid bracket was found before the iteration limit..." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -0.5, 0.5, maxiter=10) + + @pytest.mark.parametrize('method', ('brent', 'golden')) + def test_minimize_scalar_success_false(self, method): + # Check that status information from `bracket` gets to minimize_scalar + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + + res = optimize.minimize_scalar(f, bracket=(-1, 1), method=method) + assert not res.success + assert message in res.message + assert res.nfev == 3 + assert res.nit == 0 + assert res.fun == 100 + + +def test_brent_negative_tolerance(): + assert_raises(ValueError, optimize.brent, np.cos, tol=-.01) + + +class TestNewtonCg: + def test_rosenbrock(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.rosen_hess, + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_himmelblau(self): + x0 = np.array(himmelblau_x0) + sol = optimize.minimize(himmelblau, + x0, + jac=himmelblau_grad, + hess=himmelblau_hess, + method='Newton-CG', + tol=1e-6) + assert sol.success, sol.message + assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4) + assert_allclose(sol.fun, himmelblau_min, atol=1e-4) + + def test_finite_difference(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess='2-point', + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_hessian_update_strategy(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.BFGS(), + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + +def test_line_for_search(): + # _line_for_search is only used in _linesearch_powell, which is also + # tested below. Thus there are more tests of _line_for_search in the + # test_linesearch_powell_bounded function. + + line_for_search = optimize._optimize._line_for_search + # args are x0, alpha, lower_bound, upper_bound + # returns lmin, lmax + + lower_bound = np.array([-5.3, -1, -1.5, -3]) + upper_bound = np.array([1.9, 1, 2.8, 3]) + + # test when starting in the bounds + x0 = np.array([0., 0, 0, 0]) + # and when starting outside of the bounds + x1 = np.array([0., 2, -3, 0]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -1.5, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, 3), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, 1.5), + (x0, np.array([2., 0, -1, 2]), -1.5, 0.95), + (x1, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), 1.5, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, 3), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.3, -1.5), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + # now with infinite bounds + lower_bound = np.array([-np.inf, -1, -np.inf, -3]) + upper_bound = np.array([np.inf, 1, 2.8, np.inf]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, np.inf), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, np.inf), + (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, np.inf), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.8, np.inf), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + +def test_linesearch_powell(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p + direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + lower_bound = np.array([-np.inf] * 4) + upper_bound = np.array([np.inf] * 4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, + fval=fval, tol=1e-5) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_linesearch_powell_bounded(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p+direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # first choose bounds such that the same tests from + # test_linesearch_powell should pass. + lower_bound = np.array([-2.]*4) + upper_bound = np.array([2.]*4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose bounds such that unbounded vs bounded gives different results + lower_bound = np.array([-.3]*3 + [-1]) + upper_bound = np.array([.45]*3 + [.9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .45), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now mix in inf + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # now choose bounds that mix inf + lower_bound = np.array([-.3, -np.inf, -np.inf, -1]) + upper_bound = np.array([np.inf, .45, np.inf, .9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_powell_limits(): + # gh15342 - powell was going outside bounds for some function evaluations. + bounds = optimize.Bounds([0, 0], [0.6, 20]) + + def fun(x): + a, b = x + assert (x >= bounds.lb).all() and (x <= bounds.ub).all() + return a ** 2 + b ** 2 + + optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds) + + # Another test from the original report - gh-13411 + bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,]) + + def func(x): + assert x >= 0 and x <= 1 + return np.exp(x) + + optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds) + + +class TestRosen: + + def test_hess(self): + # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775. + x = np.array([3, 4, 5]) + p = np.array([2, 2, 2]) + hp = optimize.rosen_hess_prod(x, p) + dothp = np.dot(optimize.rosen_hess(x), p) + assert_equal(hp, dothp) + + +def himmelblau(p): + """ + R^2 -> R^1 test function for optimization. The function has four local + minima where himmelblau(xopt) == 0. + """ + x, y = p + a = x*x + y - 11 + b = x + y*y - 7 + return a*a + b*b + + +def himmelblau_grad(p): + x, y = p + return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14, + 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22]) + + +def himmelblau_hess(p): + x, y = p + return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y], + [4*x + 4*y, 4*x + 12*y**2 - 26]]) + + +himmelblau_x0 = [-0.27, -0.9] +himmelblau_xopt = [3, 2] +himmelblau_min = 0.0 + + +def test_minimize_multiple_constraints(): + # Regression test for gh-4240. + def func(x): + return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + + def func1(x): + return np.array([x[1]]) + + def func2(x): + return np.array([x[2]]) + + cons = ({'type': 'ineq', 'fun': func}, + {'type': 'ineq', 'fun': func1}, + {'type': 'ineq', 'fun': func2}) + + def f(x): + return -1 * (x[0] + x[1] + x[2]) + + res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) + assert_allclose(res.x, [125, 0, 0], atol=1e-10) + + +class TestOptimizeResultAttributes: + # Test that all minimizers return an OptimizeResult containing + # all the OptimizeResult attributes + def setup_method(self): + self.x0 = [5, 5] + self.func = optimize.rosen + self.jac = optimize.rosen_der + self.hess = optimize.rosen_hess + self.hessp = optimize.rosen_hess_prod + self.bounds = [(0., 10.), (0., 10.)] + + def test_attributes_present(self): + attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun', + 'message'] + skip = {'cobyla': ['nit']} + for method in MINIMIZE_METHODS: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + ("Method .+ does not use (gradient|Hessian.*)" + " information")) + res = optimize.minimize(self.func, self.x0, method=method, + jac=self.jac, hess=self.hess, + hessp=self.hessp) + for attribute in attributes: + if method in skip and attribute in skip[method]: + continue + + assert hasattr(res, attribute) + assert attribute in dir(res) + + # gh13001, OptimizeResult.message should be a str + assert isinstance(res.message, str) + + +def f1(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + +def f2(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + +def f3(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + +def brute_func(z, *params): + return f1(z, *params) + f2(z, *params) + f3(z, *params) + + +class TestBrute: + # Test the "brute force" method + def setup_method(self): + self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + self.solution = np.array([-1.05665192, 1.80834843]) + + def brute_func(self, z, *params): + # an instance method optimizing + return brute_func(z, *params) + + def test_brute(self): + # test fmin + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=optimize.fmin) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test minimize + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test that brute can optimize an instance method (the other tests use + # a non-class based function + resbrute = optimize.brute(self.brute_func, self.rranges, + args=self.params, full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + + def test_1D(self): + # test that for a 1-D problem the test function is passed an array, + # not a scalar. + def f(x): + assert len(x.shape) == 1 + assert x.shape[0] == 1 + return x ** 2 + + optimize.brute(f, [(-1, 1)], Ns=3, finish=None) + + def test_workers(self): + # check that parallel evaluation works + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None) + + resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None, workers=2) + + assert_allclose(resbrute1[-1], resbrute[-1]) + assert_allclose(resbrute1[0], resbrute[0]) + + def test_runtime_warning(self, capsys): + rng = np.random.default_rng(1234) + + def func(z, *params): + return rng.random(1) * 1000 # never converged problem + + msg = "final optimization did not succeed.*|Maximum number of function eval.*" + with pytest.warns(RuntimeWarning, match=msg): + optimize.brute(func, self.rranges, args=self.params, disp=True) + + def test_coerce_args_param(self): + # optimize.brute should coerce non-iterable args to a tuple. + def f(x, *args): + return x ** args[0] + + resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2) + assert_allclose(resbrute, 0) + + +def test_cobyla_threadsafe(): + + # Verify that cobyla is threadsafe. Will segfault if it is not. + + import concurrent.futures + import time + + def objective1(x): + time.sleep(0.1) + return x[0]**2 + + def objective2(x): + time.sleep(0.1) + return (x[0]-1)**2 + + min_method = "COBYLA" + + def minimizer1(): + return optimize.minimize(objective1, + [0.0], + method=min_method) + + def minimizer2(): + return optimize.minimize(objective2, + [0.0], + method=min_method) + + with concurrent.futures.ThreadPoolExecutor() as pool: + tasks = [] + tasks.append(pool.submit(minimizer1)) + tasks.append(pool.submit(minimizer2)) + for t in tasks: + t.result() + + +class TestIterationLimits: + # Tests that optimisation does not give up before trying requested + # number of iterations or evaluations. And that it does not succeed + # by exceeding the limits. + def setup_method(self): + self.funcalls = 0 + + def slow_func(self, v): + self.funcalls += 1 + r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1]) + return np.sin(r*20 + t)+r*0.5 + + def test_neldermead_limit(self): + self.check_limits("Nelder-Mead", 200) + + def test_powell_limit(self): + self.check_limits("powell", 1000) + + def check_limits(self, method, default_iters): + for start_v in [[0.1, 0.1], [1, 1], [2, 2]]: + for mfev in [50, 500, 5000]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev + else: + assert res["nfev"] >= mfev + for mit in [50, 500, 5000]: + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit}) + if res["success"]: + assert res["nit"] <= mit + else: + assert res["nit"] >= mit + for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev and res["nit"] <= mit + else: + assert res["nfev"] >= mfev or res["nit"] >= mit + for mfev, mit in [[np.inf, None], [None, np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + if mfev is None: + assert res["nfev"] < default_iters*2 + else: + assert res["nit"] <= default_iters*2 + else: + assert (res["nfev"] >= default_iters*2 + or res["nit"] >= default_iters*2) + + +def test_result_x_shape_when_len_x_is_one(): + def fun(x): + return x * x + + def jac(x): + return 2. * x + + def hess(x): + return np.array([[2.]]) + + methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', + 'COBYLA', 'SLSQP'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method) + assert res.x.shape == (1,) + + # use jac + hess + methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'Newton-CG'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac, + hess=hess) + assert res.x.shape == (1,) + + +class FunctionWithGradient: + def __init__(self): + self.number_of_calls = 0 + + def __call__(self, x): + self.number_of_calls += 1 + return np.sum(x**2), 2 * x + + +@pytest.fixture +def function_with_gradient(): + return FunctionWithGradient() + + +def test_memoize_jac_function_before_gradient(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls == 1 + + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls == 1, \ + "function is not recomputed " \ + "if gradient is requested after function value" + + assert_allclose( + memoized_function(2 * x0), 20.0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_gradient_before_function(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls == 1 + + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls == 1, \ + "function is not recomputed " \ + "if function value is requested after gradient" + + assert_allclose( + memoized_function.derivative(2 * x0), 4 * x0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_with_bfgs(function_with_gradient): + """ Tests that using MemoizedJac in combination with ScalarFunction + and BFGS does not lead to repeated function evaluations. + Tests changes made in response to GH11868. + """ + memoized_function = MemoizeJac(function_with_gradient) + jac = memoized_function.derivative + hess = optimize.BFGS() + + x0 = np.array([1.0, 0.5]) + scalar_function = ScalarFunction( + memoized_function, x0, (), jac, hess, None, None) + assert function_with_gradient.number_of_calls == 1 + + scalar_function.fun(x0 + 0.1) + assert function_with_gradient.number_of_calls == 2 + + scalar_function.fun(x0 + 0.2) + assert function_with_gradient.number_of_calls == 3 + + +def test_gh12696(): + # Test that optimize doesn't throw warning gh-12696 + with assert_no_warnings(): + optimize.fminbound( + lambda x: np.array([x**2]), -np.pi, np.pi, disp=False) + + +# --- Test minimize with equal upper and lower bounds --- # + +def setup_test_equal_bounds(): + + np.random.seed(0) + x0 = np.random.rand(4) + lb = np.array([0, 2, -1, -1.0]) + ub = np.array([3, 2, 2, -1.0]) + i_eb = (lb == ub) + + def check_x(x, check_size=True, check_values=True): + if check_size: + assert x.size == 4 + if check_values: + assert_allclose(x[i_eb], lb[i_eb]) + + def func(x): + check_x(x) + return optimize.rosen(x) + + def grad(x): + check_x(x) + return optimize.rosen_der(x) + + def callback(x, *args): + check_x(x) + + def constraint1(x): + check_x(x, check_values=False) + return x[0:1] - 1 + + def jacobian1(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[0] = 1 + return dc + + def constraint2(x): + check_x(x, check_values=False) + return x[2:3] - 0.5 + + def jacobian2(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[2] = 1 + return dc + + c1a = NonlinearConstraint(constraint1, -np.inf, 0) + c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1) + c2a = NonlinearConstraint(constraint2, -np.inf, 0) + c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2) + + # test using the three methods that accept bounds, use derivatives, and + # have some trouble when bounds fix variables + methods = ('L-BFGS-B', 'SLSQP', 'TNC') + + # test w/out gradient, w/ gradient, and w/ combined objective/gradient + kwds = ({"fun": func, "jac": False}, + {"fun": func, "jac": grad}, + {"fun": (lambda x: (func(x), grad(x))), + "jac": True}) + + # test with both old- and new-style bounds + bound_types = (lambda lb, ub: list(zip(lb, ub)), + Bounds) + + # Test for many combinations of constraints w/ and w/out jacobian + # Pairs in format: (test constraints, reference constraints) + # (always use analytical jacobian in reference) + constraints = ((None, None), ([], []), + (c1a, c1b), (c2b, c2b), + ([c1b], [c1b]), ([c2a], [c2b]), + ([c1a, c2a], [c1b, c2b]), + ([c1a, c2b], [c1b, c2b]), + ([c1b, c2b], [c1b, c2b])) + + # test with and without callback function + callbacks = (None, callback) + + data = {"methods": methods, "kwds": kwds, "bound_types": bound_types, + "constraints": constraints, "callbacks": callbacks, + "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb} + + return data + + +eb_data = setup_test_equal_bounds() + + +# This test is about handling fixed variables, not the accuracy of the solvers +@pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic") +@pytest.mark.parametrize('method', eb_data["methods"]) +@pytest.mark.parametrize('kwds', eb_data["kwds"]) +@pytest.mark.parametrize('bound_type', eb_data["bound_types"]) +@pytest.mark.parametrize('constraints', eb_data["constraints"]) +@pytest.mark.parametrize('callback', eb_data["callbacks"]) +def test_equal_bounds(method, kwds, bound_type, constraints, callback): + """ + Tests that minimizers still work if (bounds.lb == bounds.ub).any() + gh12502 - Divide by zero in Jacobian numerical differentiation when + equality bounds constraints are used + """ + # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882 + if (platform.machine() == 'aarch64' and method == "TNC" + and kwds["jac"] is False and callback is not None): + pytest.skip('Tolerance violation on aarch') + + lb, ub = eb_data["lb"], eb_data["ub"] + x0, i_eb = eb_data["x0"], eb_data["i_eb"] + + test_constraints, reference_constraints = constraints + if test_constraints and not method == 'SLSQP': + pytest.skip('Only SLSQP supports nonlinear constraints') + # reference constraints always have analytical jacobian + # if test constraints are not the same, we'll need finite differences + fd_needed = (test_constraints != reference_constraints) + + bounds = bound_type(lb, ub) # old- or new-style + + kwds.update({"x0": x0, "method": method, "bounds": bounds, + "constraints": test_constraints, "callback": callback}) + res = optimize.minimize(**kwds) + + expected = optimize.minimize(optimize.rosen, x0, method=method, + jac=optimize.rosen_der, bounds=bounds, + constraints=reference_constraints) + + # compare the output of a solution with FD vs that of an analytic grad + assert res.success + assert_allclose(res.fun, expected.fun, rtol=1.5e-6) + assert_allclose(res.x, expected.x, rtol=5e-4) + + if fd_needed or kwds['jac'] is False: + expected.jac[i_eb] = np.nan + assert res.jac.shape[0] == 4 + assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6) + + if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)): + # compare the output to an equivalent FD minimization that doesn't + # need factorization + def fun(x): + new_x = np.array([np.nan, 2, np.nan, -1]) + new_x[[0, 2]] = x + return optimize.rosen(new_x) + + fd_res = optimize.minimize(fun, + x0[[0, 2]], + method=method, + bounds=bounds[::2]) + assert_allclose(res.fun, fd_res.fun) + # TODO this test should really be equivalent to factorized version + # above, down to res.nfev. However, testing found that when TNC is + # called with or without a callback the output is different. The two + # should be the same! This indicates that the TNC callback may be + # mutating something when it shouldn't. + assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6) + + +@pytest.mark.parametrize('method', eb_data["methods"]) +def test_all_bounds_equal(method): + # this only tests methods that have parameters factored out when lb==ub + # it does not test other methods that work with bounds + def f(x, p1=1): + return np.linalg.norm(x) + p1 + + bounds = [(1, 1), (2, 2)] + x0 = (1.0, 3.0) + res = optimize.minimize(f, x0, bounds=bounds, method=method) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + assert res.message == 'All independent variables were fixed by bounds.' + + args = (2,) + res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0], 2)) + + if method.upper() == 'SLSQP': + def con(x): + return np.sum(x) + nlc = NonlinearConstraint(con, -np.inf, 0.0) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is False + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds, but" + assert res.message.startswith(message) + + nlc = NonlinearConstraint(con, -np.inf, 4) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is True + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds at values" + assert res.message.startswith(message) + + +def test_eb_constraints(): + # make sure constraint functions aren't overwritten when equal bounds + # are employed, and a parameter is factored out. GH14859 + def f(x): + return x[0]**3 + x[1]**2 + x[2]*x[3] + + def cfun(x): + return x[0] + x[1] + x[2] + x[3] - 40 + + constraints = [{'type': 'ineq', 'fun': cfun}] + + bounds = [(0, 20)] * 4 + bounds[1] = (5, 5) + optimize.minimize( + f, + x0=[1, 2, 3, 4], + method='SLSQP', + bounds=bounds, + constraints=constraints, + ) + assert constraints[0]['fun'] == cfun + + +def test_show_options(): + solver_methods = { + 'minimize': MINIMIZE_METHODS, + 'minimize_scalar': MINIMIZE_SCALAR_METHODS, + 'root': ROOT_METHODS, + 'root_scalar': ROOT_SCALAR_METHODS, + 'linprog': LINPROG_METHODS, + 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS, + } + for solver, methods in solver_methods.items(): + for method in methods: + # testing that `show_options` works without error + show_options(solver, method) + + unknown_solver_method = { + 'minimize': "ekki", # unknown method + 'maximize': "cg", # unknown solver + 'maximize_scalar': "ekki", # unknown solver and method + } + for solver, method in unknown_solver_method.items(): + # testing that `show_options` raises ValueError + assert_raises(ValueError, show_options, solver, method) + + +def test_bounds_with_list(): + # gh13501. Bounds created with lists weren't working for Powell. + bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.]) + optimize.minimize( + optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds + ) + + +def test_x_overwritten_user_function(): + # if the user overwrites the x-array in the user function it's likely + # that the minimizer stops working properly. + # gh13740 + def fquad(x): + a = np.arange(np.size(x)) + x -= a + x *= x + return np.sum(x) + + def fquad_jac(x): + a = np.arange(np.size(x)) + x *= 2 + x -= 2 * a + return x + + def fquad_hess(x): + return np.eye(np.size(x)) * 2.0 + + meth_jac = [ + 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'trust-constr' + ] + meth_hess = [ + 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr' + ] + + x0 = np.ones(5) * 1.5 + + for meth in MINIMIZE_METHODS: + jac = None + hess = None + if meth in meth_jac: + jac = fquad_jac + if meth in meth_hess: + hess = fquad_hess + res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess) + assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4) + + +class TestGlobalOptimization: + + def test_optimize_result_attributes(self): + def func(x): + return x ** 2 + + # Note that `brute` solver does not return `OptimizeResult` + results = [optimize.basinhopping(func, x0=1), + optimize.differential_evolution(func, [(-4, 4)]), + optimize.shgo(func, [(-4, 4)]), + optimize.dual_annealing(func, [(-4, 4)]), + optimize.direct(func, [(-4, 4)]), + ] + + for result in results: + assert isinstance(result, optimize.OptimizeResult) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + +def test_approx_fprime(): + # check that approx_fprime (serviced by approx_derivative) works for + # jac and hess + g = optimize.approx_fprime(himmelblau_x0, himmelblau) + assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6) + + h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad) + assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6) + + +def test_gh12594(): + # gh-12594 reported an error in `_linesearch_powell` and + # `_line_for_search` when `Bounds` was passed lists instead of arrays. + # Check that results are the same whether the inputs are lists or arrays. + + def f(x): + return x[0]**2 + (x[1] - 1)**2 + + bounds = Bounds(lb=[-10, -10], ub=[10, 10]) + res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10])) + ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +@pytest.mark.parametrize('method', ['Newton-CG', 'trust-constr']) +@pytest.mark.parametrize('sparse_type', [coo_matrix, csc_matrix, csr_matrix, + coo_array, csr_array, csc_array]) +def test_sparse_hessian(method, sparse_type): + # gh-8792 reported an error for minimization with `newton_cg` when `hess` + # returns a sparse matrix. Check that results are the same whether `hess` + # returns a dense or sparse matrix for optimization methods that accept + # sparse Hessian matrices. + + def sparse_rosen_hess(x): + return sparse_type(rosen_hess(x)) + + x0 = [2., 2.] + + res_sparse = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=sparse_rosen_hess) + res_dense = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=rosen_hess) + + assert_allclose(res_dense.fun, res_sparse.fun) + assert_allclose(res_dense.x, res_sparse.x) + assert res_dense.nfev == res_sparse.nfev + assert res_dense.njev == res_sparse.njev + assert res_dense.nhev == res_sparse.nhev diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..6f476be1604a21573b15b5dedfa3ceb47974e2d8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py @@ -0,0 +1,431 @@ +import pytest +import numpy as np +from scipy.optimize import quadratic_assignment, OptimizeWarning +from scipy.optimize._qap import _calc_score as _score +from numpy.testing import assert_equal, assert_, assert_warns + + +################ +# Common Tests # +################ + +def chr12c(): + A = [ + [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0], + [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0], + [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0], + [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0], + [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0], + [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0], + [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37], + [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0], + ] + B = [ + [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95], + [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36], + [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63], + [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85], + [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76], + [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34], + [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37], + [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80], + [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33], + [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86], + [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18], + [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0], + ] + A, B = np.array(A), np.array(B) + n = A.shape[0] + + opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n + + return A, B, opt_perm + + +class QAPCommonTests: + """ + Base class for `quadratic_assignment` tests. + """ + def setup_method(self): + np.random.seed(0) + + # Test global optima of problem from Umeyama IVB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_1(self): + # besides testing accuracy, check that A and B can be lists + A = [[0, 3, 4, 2], + [0, 0, 1, 2], + [1, 0, 0, 1], + [0, 0, 1, 0]] + + B = [[0, 4, 2, 4], + [0, 0, 1, 0], + [0, 2, 0, 2], + [0, 1, 2, 0]] + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": False}) + assert_equal(res.fun, 10) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + + if self.method == 'faq': + # Global optimum is 40, but FAQ gets 37 + assert_equal(res.fun, 37) + assert_equal(res.col_ind, np.array([0, 2, 3, 1])) + else: + assert_equal(res.fun, 40) + assert_equal(res.col_ind, np.array([0, 3, 1, 2])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + + # Test global optima of problem from Umeyama IIIB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_2(self): + + A = np.array([[0, 5, 8, 6], + [5, 0, 5, 1], + [8, 5, 0, 2], + [6, 1, 2, 0]]) + + B = np.array([[0, 1, 8, 4], + [1, 0, 5, 2], + [8, 5, 0, 5], + [4, 2, 5, 0]]) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": False}) + if self.method == 'faq': + # Global optimum is 176, but FAQ gets 178 + assert_equal(res.fun, 178) + assert_equal(res.col_ind, np.array([1, 0, 3, 2])) + else: + assert_equal(res.fun, 176) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + assert_equal(res.fun, 286) + assert_equal(res.col_ind, np.array([2, 3, 0, 1])) + + def test_accuracy_3(self): + + A, B, opt_perm = chr12c() + + # basic minimization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # basic maximization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, 'maximize': True}) + assert_(74000 <= res.fun < 85000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # check ofv with strictly partial match + seed_cost = np.array([4, 8, 10]) + seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost]) + + # check performance when partial match is the global optimum + seed = np.asarray([np.arange(len(A)), opt_perm]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed}) + assert_equal(res.col_ind, seed[:, 1].T) + assert_equal(res.fun, 11156) + assert_equal(res.nit, 0) + + # check performance with zero sized matrix inputs + empty = np.empty((0, 0)) + res = quadratic_assignment(empty, empty, method=self.method, + options={"rng": 0}) + assert_equal(res.nit, 0) + assert_equal(res.fun, 0) + + def test_unknown_options(self): + A, B, opt_perm = chr12c() + + def f(): + quadratic_assignment(A, B, method=self.method, + options={"ekki-ekki": True}) + assert_warns(OptimizeWarning, f) + + +class TestFAQ(QAPCommonTests): + method = "faq" + + def test_options(self): + # cost and distance matrices of QAPLIB instance chr12c + A, B, opt_perm = chr12c() + n = len(A) + + # check that max_iter is obeying with low input value + res = quadratic_assignment(A, B, + options={'maxiter': 5}) + assert_equal(res.nit, 5) + + # test with shuffle + res = quadratic_assignment(A, B, + options={'shuffle_input': True}) + assert_(11156 <= res.fun < 21000) + + # test with randomized init + res = quadratic_assignment(A, B, + options={'rng': 1, 'P0': "randomized"}) + assert_(11156 <= res.fun < 21000) + + # check with specified P0 + K = np.ones((n, n)) / float(n) + K = _doubly_stochastic(K) + res = quadratic_assignment(A, B, + options={'P0': K}) + assert_(11156 <= res.fun < 21000) + + def test_specific_input_validation(self): + + A = np.identity(2) + B = A + + # method is implicitly faq + + # ValueError Checks: making sure single value parameters are of + # correct value + with pytest.raises(ValueError, match="Invalid 'P0' parameter"): + quadratic_assignment(A, B, options={'P0': "random"}) + with pytest.raises( + ValueError, match="'maxiter' must be a positive integer"): + quadratic_assignment(A, B, options={'maxiter': -1}) + with pytest.raises(ValueError, match="'tol' must be a positive float"): + quadratic_assignment(A, B, options={'tol': -1}) + + # TypeError Checks: making sure single value parameters are of + # correct type + with pytest.raises(TypeError): + quadratic_assignment(A, B, options={'maxiter': 1.5}) + + # test P0 matrix input + with pytest.raises( + ValueError, + match="`P0` matrix must have shape m' x m', where m'=n-m"): + quadratic_assignment( + np.identity(4), np.identity(4), + options={'P0': np.ones((3, 3))} + ) + + K = [[0.4, 0.2, 0.3], + [0.3, 0.6, 0.2], + [0.2, 0.2, 0.7]] + # matrix that isn't quite doubly stochastic + with pytest.raises( + ValueError, match="`P0` matrix must be doubly stochastic"): + quadratic_assignment( + np.identity(3), np.identity(3), options={'P0': K} + ) + + +class Test2opt(QAPCommonTests): + method = "2opt" + + def test_deterministic(self): + # np.random.seed(0) executes before every method + n = 20 + + A = np.random.rand(n, n) + B = np.random.rand(n, n) + res1 = quadratic_assignment(A, B, method=self.method) + + np.random.seed(0) + + A = np.random.rand(n, n) + B = np.random.rand(n, n) + res2 = quadratic_assignment(A, B, method=self.method) + + assert_equal(res1.nit, res2.nit) + + def test_partial_guess(self): + n = 5 + A = np.random.rand(n, n) + B = np.random.rand(n, n) + + res1 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0}) + guess = np.array([np.arange(5), res1.col_ind]).T + res2 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0, 'partial_guess': guess}) + fix = [2, 4] + match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T + res3 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0, 'partial_guess': guess, + 'partial_match': match}) + assert_(res1.nit != n*(n+1)/2) + assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once + assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once + + def test_specific_input_validation(self): + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_guess` can have only as many entries as"): + quadratic_assignment(np.identity(3), np.identity(3), + method=self.method, + options={'partial_guess': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_guess` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_guess` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_guess` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_guess` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), method=self.method, + options={'partial_guess': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_guess` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.ones((2, 2))} + ) + + +class TestQAPOnce: + def setup_method(self): + np.random.seed(0) + + # these don't need to be repeated for each method + def test_common_input_validation(self): + # test that non square matrices return error + with pytest.raises(ValueError, match="`A` must be square"): + quadratic_assignment( + np.random.random((3, 4)), + np.random.random((3, 3)), + ) + with pytest.raises(ValueError, match="`B` must be square"): + quadratic_assignment( + np.random.random((3, 3)), + np.random.random((3, 4)), + ) + # test that cost and dist matrices have no more than two dimensions + with pytest.raises( + ValueError, match="`A` and `B` must have exactly two"): + quadratic_assignment( + np.random.random((3, 3, 3)), + np.random.random((3, 3, 3)), + ) + # test that cost and dist matrices of different sizes return error + with pytest.raises( + ValueError, + match="`A` and `B` matrices must be of equal size"): + quadratic_assignment( + np.random.random((3, 3)), + np.random.random((4, 4)), + ) + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_match` can have only as many seeds as"): + quadratic_assignment(np.identity(3), np.identity(3), + options={'partial_match': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_match` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_match` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_match` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_match` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), + options={'partial_match': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_match` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.ones((2, 2))} + ) + + +def _range_matrix(a, b): + mat = np.zeros((a, b)) + for i in range(b): + mat[:, i] = np.arange(a) + return mat + + +def _doubly_stochastic(P, tol=1e-3): + # cleaner implementation of btaba/sinkhorn_knopp + + max_iter = 1000 + c = 1 / P.sum(axis=0) + r = 1 / (P @ c) + P_eps = P + + for it in range(max_iter): + if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and + (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): + # All column/row sums ~= 1 within threshold + break + + c = 1 / (r @ P) + r = 1 / (P @ c) + P_eps = r[:, None] * P * c + + return P_eps diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..44916ba96293db19756b8222422e76945aa48ebb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py @@ -0,0 +1,40 @@ +"""Regression tests for optimize. + +""" +import numpy as np +from numpy.testing import assert_almost_equal +from pytest import raises as assert_raises + +import scipy.optimize + + +class TestRegression: + + def test_newton_x0_is_0(self): + # Regression test for gh-1601 + tgt = 1 + res = scipy.optimize.newton(lambda x: x - 1, 0) + assert_almost_equal(res, tgt) + + def test_newton_integers(self): + # Regression test for gh-1741 + root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, + fprime=lambda x: 2*x) + assert_almost_equal(root, 1.0) + + def test_lmdif_errmsg(self): + # This shouldn't cause a crash on Python 3 + class SomeError(Exception): + pass + counter = [0] + + def func(x): + counter[0] += 1 + if counter[0] < 3: + return x**2 - np.array([9, 10, 11]) + else: + raise SomeError() + assert_raises(SomeError, + scipy.optimize.leastsq, + func, [1, 2, 3]) + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..cab46291b91e53a6b5f55cc0185741ca966ba514 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py @@ -0,0 +1,608 @@ +""" +Unit test for SLSQP optimization. +""" +from numpy.testing import (assert_, assert_array_almost_equal, + assert_allclose, assert_equal) +from pytest import raises as assert_raises +import pytest +import numpy as np + +from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint + + +class MyCallBack: + """pass a custom callback function + + This makes sure it's being used. + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x): + self.been_called = True + self.ncalls += 1 + + +class TestSLSQP: + """ + Test SLSQP algorithm using Example 14.4 from Numerical Methods for + Engineers by Steven Chapra and Raymond Canale. + This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, + which has a maximum at x=2, y=1. + """ + def setup_method(self): + self.opts = {'disp': False} + + def fun(self, d, sign=1.0): + """ + Arguments: + d - A list of two elements, where d[0] represents x and d[1] represents y + in the following equation. + sign - A multiplier for f. Since we want to optimize it, and the SciPy + optimizers can only minimize functions, we need to multiply it by + -1 to achieve the desired solution + Returns: + 2*x*y + 2*x - x**2 - 2*y**2 + + """ + x = d[0] + y = d[1] + return sign*(2*x*y + 2*x - x**2 - 2*y**2) + + def jac(self, d, sign=1.0): + """ + This is the derivative of fun, returning a NumPy array + representing df/dx and df/dy. + + """ + x = d[0] + y = d[1] + dfdx = sign*(-2*x + 2*y + 2) + dfdy = sign*(2*x - 4*y) + return np.array([dfdx, dfdy], float) + + def fun_and_jac(self, d, sign=1.0): + return self.fun(d, sign), self.jac(d, sign) + + def f_eqcon(self, x, sign=1.0): + """ Equality constraint """ + return np.array([x[0] - x[1]]) + + def fprime_eqcon(self, x, sign=1.0): + """ Equality constraint, derivative """ + return np.array([[1, -1]]) + + def f_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint """ + return self.f_eqcon(x, sign)[0] + + def fprime_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint, derivative """ + return self.fprime_eqcon(x, sign)[0].tolist() + + def f_ieqcon(self, x, sign=1.0): + """ Inequality constraint """ + return np.array([x[0] - x[1] - 1.0]) + + def fprime_ieqcon(self, x, sign=1.0): + """ Inequality constraint, derivative """ + return np.array([[1, -1]]) + + def f_ieqcon2(self, x): + """ Vector inequality constraint """ + return np.asarray(x) + + def fprime_ieqcon2(self, x): + """ Vector inequality constraint, derivative """ + return np.identity(x.shape[0]) + + # minimize + def test_minimize_unbounded_approximated(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, method='SLSQP', + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_unbounded_given(self): + # Minimize, method='SLSQP': unbounded, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=self.jac, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_approximated(self): + # Minimize, method='SLSQP': bounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + with np.errstate(invalid='ignore'): + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + bounds=((2.5, None), (None, 0.5)), + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2.5, 0.5]) + assert_(2.5 <= res.x[0]) + assert_(res.x[1] <= 0.5) + + def test_minimize_unbounded_combined(self): + # Minimize, method='SLSQP': unbounded, combined function and Jacobian. + res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ), + jac=True, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_equality_approximated(self): + # Minimize with method='SLSQP': equality constraint, approx. jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, )}, + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'eq', 'fun':self.f_eqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given2(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian + # for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given_cons_scalar(self): + # Minimize with method='SLSQP': scalar equality constraint, given + # Jacobian for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon_scalar, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon_scalar}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_inequality_given(self): + # Minimize with method='SLSQP': inequality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1], atol=1e-3) + + def test_minimize_inequality_given_vector_constraints(self): + # Minimize with method='SLSQP': vector inequality constraint, given + # Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon2, + 'jac': self.fprime_ieqcon2}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_constraint(self): + # when the constraint makes the solver go up against a parameter + # bound make sure that the numerical differentiation of the + # jacobian doesn't try to exceed that bound using a finite difference. + # gh11403 + def c(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return x[0] ** 0.5 + x[1] + + def f(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return -x[0] ** 2 + x[1] ** 2 + + cns = [NonlinearConstraint(c, 0, 1.5)] + x0 = np.asarray([0.9, 0.5]) + bnd = Bounds([0., 0.], [1.0, 1.0]) + minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns) + + def test_minimize_bound_equality_given2(self): + # Minimize with method='SLSQP': bounds, eq. const., given jac. for + # fun. and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + bounds=[(-0.8, 1.), (-1, 0.8)], + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [0.8, 0.8], atol=1e-3) + assert_(-0.8 <= res.x[0] <= 1) + assert_(-1 <= res.x[1] <= 0.8) + + # fmin_slsqp + def test_unbounded_approximated(self): + # SLSQP: unbounded, approximated Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_unbounded_given(self): + # SLSQP: unbounded, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + fprime = self.jac, iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_equality_approximated(self): + # SLSQP: equality constraint, approximated Jacobian. + res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,), + eqcons = [self.f_eqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given(self): + # SLSQP: equality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + eqcons = [self.f_eqcon], iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given2(self): + # SLSQP: equality constraint, given Jacobian for fun and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_inequality_given(self): + # SLSQP: inequality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + ieqcons = [self.f_ieqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1], decimal=3) + + def test_bound_equality_given2(self): + # SLSQP: bounds, eq. const., given jac. for fun. and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + bounds = [(-0.8, 1.), (-1, 0.8)], + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [0.8, 0.8], decimal=3) + assert_(-0.8 <= x[0] <= 1) + assert_(-1 <= x[1] <= 0.8) + + def test_scalar_constraints(self): + # Regression test for gh-2182 + x = fmin_slsqp(lambda z: z**2, [3.], + ieqcons=[lambda z: z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + x = fmin_slsqp(lambda z: z**2, [3.], + f_ieqcons=lambda z: [z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + def test_integer_bounds(self): + # This should not raise an exception + fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0) + + def test_array_bounds(self): + # NumPy used to treat n-dimensional 1-element arrays as scalars + # in some cases. The handling of `bounds` by `fmin_slsqp` still + # supports this behavior. + bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))] + x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds, + iprint=0) + assert_array_almost_equal(x, [0, 2]) + + def test_obj_must_return_scalar(self): + # Regression test for Github Issue #5433 + # If objective function does not return a scalar, raises ValueError + with assert_raises(ValueError): + fmin_slsqp(lambda x: [0, 1], [1, 2, 3]) + + def test_obj_returns_scalar_in_list(self): + # Test for Github Issue #5433 and PR #6691 + # Objective function should be able to return length-1 Python list + # containing the scalar + fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0) + + def test_callback(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback + callback = MyCallBack() + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + method='SLSQP', callback=callback, options=self.opts) + assert_(res['success'], res['message']) + assert_(callback.been_called) + assert_equal(callback.ncalls, res['nit']) + + def test_inconsistent_linearization(self): + # SLSQP must be able to solve this problem, even if the + # linearized problem at the starting point is infeasible. + + # Linearized constraints are + # + # 2*x0[0]*x[0] >= 1 + # + # At x0 = [0, 1], the second constraint is clearly infeasible. + # This triggers a call with n2==1 in the LSQ subroutine. + x = [0, 1] + def f1(x): + return x[0] + x[1] - 2 + def f2(x): + return x[0] ** 2 - 1 + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': f1}, + {'type':'ineq','fun': f2}), + bounds=((0,None), (0,None)), + method='SLSQP') + x = sol.x + + assert_allclose(f1(x), 0, atol=1e-8) + assert_(f2(x) >= -1e-8) + assert_(sol.success, sol) + + def test_regression_5743(self): + # SLSQP must not indicate success for this problem, + # which is infeasible. + x = [1, 2] + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1}, + {'type':'ineq','fun': lambda x: x[0]-2}), + bounds=((0,None), (0,None)), + method='SLSQP') + assert_(not sol.success, sol) + + def test_gh_6676(self): + def func(x): + return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2 + + sol = minimize(func, [0, 0, 0], method='SLSQP') + assert_(sol.jac.shape == (3,)) + + def test_invalid_bounds(self): + # Raise correct error when lower bound is greater than upper bound. + # See Github issue 6875. + bounds_list = [ + ((1, 2), (2, 1)), + ((2, 1), (1, 2)), + ((2, 1), (2, 1)), + ((np.inf, 0), (np.inf, 0)), + ((1, -np.inf), (0, 1)), + ] + for bounds in bounds_list: + with assert_raises(ValueError): + minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP') + + def test_bounds_clipping(self): + # + # SLSQP returns bogus results for initial guess out of bounds, gh-6859 + # + def f(x): + return (x[0] - 1)**2 + + sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_infeasible_initial(self): + # Check SLSQP behavior with infeasible initial point + def f(x): + x, = x + return x*x - 2*x + 1 + + cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}] + cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}] + cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x}, + {'type': 'ineq', 'fun': lambda x: x + 1}] + + sol = minimize(f, [10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_inconsistent_inequalities(self): + # gh-7618 + + def cost(x): + return -1 * x[0] + 4 * x[1] + + def ineqcons1(x): + return x[1] - x[0] - 1 + + def ineqcons2(x): + return x[0] - x[1] + + # The inequalities are inconsistent, so no solution can exist: + # + # x1 >= x0 + 1 + # x0 >= x1 + + x0 = (1,5) + bounds = ((-5, 5), (-5, 5)) + cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2)) + res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons) + + assert_(not res.success) + + def test_new_bounds_type(self): + def f(x): + return x[0] ** 2 + x[1] ** 2 + bounds = Bounds([1, 0], [np.inf, np.inf]) + sol = minimize(f, [0, 0], method='slsqp', bounds=bounds) + assert_(sol.success) + assert_allclose(sol.x, [1, 0]) + + def test_nested_minimization(self): + + class NestedProblem: + + def __init__(self): + self.F_outer_count = 0 + + def F_outer(self, x): + self.F_outer_count += 1 + if self.F_outer_count > 1000: + raise Exception("Nested minimization failed to terminate.") + inner_res = minimize(self.F_inner, (3, 4), method="SLSQP") + assert_(inner_res.success) + assert_allclose(inner_res.x, [1, 1]) + return x[0]**2 + x[1]**2 + x[2]**2 + + def F_inner(self, x): + return (x[0] - 1)**2 + (x[1] - 1)**2 + + def solve(self): + outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP") + assert_(outer_res.success) + assert_allclose(outer_res.x, [0, 0, 0]) + + problem = NestedProblem() + problem.solve() + + def test_gh1758(self): + # the test suggested in gh1758 + # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/ + # implement two equality constraints, in R^2. + def fun(x): + return np.sqrt(x[1]) + + def f_eqcon(x): + """ Equality constraint """ + return x[1] - (2 * x[0]) ** 3 + + def f_eqcon2(x): + """ Equality constraint """ + return x[1] - (-x[0] + 1) ** 3 + + c1 = {'type': 'eq', 'fun': f_eqcon} + c2 = {'type': 'eq', 'fun': f_eqcon2} + + res = minimize(fun, [8, 0.25], method='SLSQP', + constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)]) + + np.testing.assert_allclose(res.fun, 0.5443310539518) + np.testing.assert_allclose(res.x, [0.33333333, 0.2962963]) + assert res.success + + def test_gh9640(self): + np.random.seed(10) + cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3}, + {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2}) + bnds = ((-2, 2), (-2, 2), (-2, 2)) + + def target(x): + return 1 + x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696] + res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons, + options={'disp':False, 'maxiter':10000}) + + # The problem is infeasible, so it cannot succeed + assert not res.success + + def test_parameters_stay_within_bounds(self): + # gh11403. For some problems the SLSQP Fortran code suggests a step + # outside one of the lower/upper bounds. When this happens + # approx_derivative complains because it's being asked to evaluate + # a gradient outside its domain. + np.random.seed(1) + bounds = Bounds(np.array([0.1]), np.array([1.0])) + n_inputs = len(bounds.lb) + x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) * + np.random.random(n_inputs)) + + def f(x): + assert (x >= bounds.lb).all() + return np.linalg.norm(x) + + with pytest.warns(RuntimeWarning, match='x were outside bounds'): + res = minimize(f, x0, method='SLSQP', bounds=bounds) + assert res.success diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..2cde9837bfd08e62916660a9750d833629b6b547 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py @@ -0,0 +1,345 @@ +""" +Unit tests for TNC optimization routine from tnc.py +""" +import pytest +from numpy.testing import assert_allclose, assert_equal + +import numpy as np +from math import pow + +from scipy import optimize + + +class TestTnc: + """TNC non-linear optimization. + + These tests are taken from Prof. K. Schittkowski's test examples + for constrained non-linear programming. + + http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm + + """ + def setup_method(self): + # options for minimize + self.opts = {'disp': False, 'maxfun': 200} + + # objective functions and Jacobian for each test + def f1(self, x, a=100.0): + return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2) + + def g1(self, x, a=100.0): + dif = [0, 0] + dif[1] = 2 * a * (x[1] - pow(x[0], 2)) + dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0) + return dif + + def fg1(self, x, a=100.0): + return self.f1(x, a), self.g1(x, a) + + def f3(self, x): + return x[1] + pow(x[1] - x[0], 2) * 1.0e-5 + + def g3(self, x): + dif = [0, 0] + dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5 + dif[1] = 1.0 - dif[0] + return dif + + def fg3(self, x): + return self.f3(x), self.g3(x) + + def f4(self, x): + return pow(x[0] + 1.0, 3) / 3.0 + x[1] + + def g4(self, x): + dif = [0, 0] + dif[0] = pow(x[0] + 1.0, 2) + dif[1] = 1.0 + return dif + + def fg4(self, x): + return self.f4(x), self.g4(x) + + def f5(self, x): + return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \ + 1.5 * x[0] + 2.5 * x[1] + 1.0 + + def g5(self, x): + dif = [0, 0] + v1 = np.cos(x[0] + x[1]) + v2 = 2.0*(x[0] - x[1]) + + dif[0] = v1 + v2 - 1.5 + dif[1] = v1 - v2 + 2.5 + return dif + + def fg5(self, x): + return self.f5(x), self.g5(x) + + def f38(self, x): + return (100.0 * pow(x[1] - pow(x[0], 2), 2) + + pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) + + pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) + + pow(x[3] - 1.0, 2)) + + 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5 + + def g38(self, x): + dif = [0, 0, 0, 0] + dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) - + 2.0 * (1.0 - x[0])) * 1.0e-5 + dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) + + 19.8 * (x[3] - 1.0)) * 1.0e-5 + dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) - + 2.0 * (1.0 - x[2])) * 1.0e-5 + dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) + + 19.8 * (x[1] - 1.0)) * 1.0e-5 + return dif + + def fg38(self, x): + return self.f38(x), self.g38(x) + + def f45(self, x): + return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0 + + def g45(self, x): + dif = [0] * 5 + dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0 + dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0 + dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0 + dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0 + dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0 + return dif + + def fg45(self, x): + return self.f45(x), self.g45(x) + + # tests + # minimize with method=TNC + def test_minimize_tnc1(self): + x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + iterx = [] # to test callback + + res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, + bounds=bnds, options=self.opts, + callback=iterx.append) + assert_allclose(res.fun, self.f1(xopt), atol=1e-8) + assert_equal(len(iterx), res.nit) + + def test_minimize_tnc1b(self): + x0, bnds = np.array([-2, 1]), ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.f1, x0, method='TNC', + bounds=bnds, options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4) + + def test_minimize_tnc1c(self): + x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.fg1, x0, method='TNC', + jac=True, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc2(self): + x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + x = optimize.minimize(self.f1, x0, method='TNC', + jac=self.g1, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc3(self): + x0, bnds = [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + x = optimize.minimize(self.f3, x0, method='TNC', + jac=self.g3, bounds=bnds, + options=self.opts).x + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8) + + def test_minimize_tnc4(self): + x0,bnds = [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + x = optimize.minimize(self.f4, x0, method='TNC', + jac=self.g4, bounds=bnds, + options=self.opts).x + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8) + + def test_minimize_tnc5(self): + x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + x = optimize.minimize(self.f5, x0, method='TNC', + jac=self.g5, bounds=bnds, + options=self.opts).x + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8) + + def test_minimize_tnc38(self): + x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + x = optimize.minimize(self.f38, x0, method='TNC', + jac=self.g38, bounds=bnds, + options=self.opts).x + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8) + + def test_minimize_tnc45(self): + x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + x = optimize.minimize(self.f45, x0, method='TNC', + jac=self.g45, bounds=bnds, + options=self.opts).x + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8) + + # fmin_tnc + def test_tnc1(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ), + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1b(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1c(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc2(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc3(self): + fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc4(self): + fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc5(self): + fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc38(self): + fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc45(self): + fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3), + (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_raising_exceptions(self): + # tnc was ported to cython from hand-crafted cpython code + # check that Exception handling works. + def myfunc(x): + raise RuntimeError("myfunc") + + def myfunc1(x): + return optimize.rosen(x) + + def callback(x): + raise ValueError("callback") + + with pytest.raises(RuntimeError): + optimize.minimize(myfunc, [0, 1], method="TNC") + + with pytest.raises(ValueError): + optimize.minimize( + myfunc1, [0, 1], method="TNC", callback=callback + ) + + def test_callback_shouldnt_affect_minimization(self): + # gh14879. The output of a TNC minimization was different depending + # on whether a callback was used or not. The two should be equivalent. + # The issue was that TNC was unscaling/scaling x, and this process was + # altering x in the process. Now the callback uses an unscaled + # temporary copy of x. + def callback(x): + pass + + fun = optimize.rosen + bounds = [(0, 10)] * 4 + x0 = [1, 2, 3, 4.] + res = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000} + ) + res2 = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}, + callback=callback + ) + assert_allclose(res2.x, res.x) + assert_allclose(res2.fun, res.fun) + assert_equal(res2.nfev, res.nfev) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py new file mode 100644 index 0000000000000000000000000000000000000000..24663f18de817a77a9e63c295b5a2a453115d101 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py @@ -0,0 +1,112 @@ +""" +Unit tests for trust-region optimization routines. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_allclose +from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess, + rosen_hess_prod) + + +class Accumulator: + """ This is for testing callbacks.""" + def __init__(self): + self.count = 0 + self.accum = None + + def __call__(self, x): + self.count += 1 + if self.accum is None: + self.accum = np.array(x) + else: + self.accum += x + + +class TestTrustRegionSolvers: + + def setup_method(self): + self.x_opt = [1.0, 1.0] + self.easy_guess = [2.0, 2.0] + self.hard_guess = [-1.2, 1.0] + + def test_dogleg_accuracy(self): + # test the accuracy and the return_all option + x0 = self.hard_guess + r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8, + method='dogleg', options={'return_all': True},) + assert_allclose(x0, r['allvecs'][0]) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(r['x'], self.x_opt) + + def test_dogleg_callback(self): + # test the callback mechanism and the maxiter and return_all options + accumulator = Accumulator() + maxiter = 5 + r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess, + callback=accumulator, method='dogleg', + options={'return_all': True, 'maxiter': maxiter},) + assert_equal(accumulator.count, maxiter) + assert_equal(len(r['allvecs']), maxiter+1) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(sum(r['allvecs'][1:]), accumulator.accum) + + def test_dogleg_user_warning(self): + with pytest.warns(RuntimeWarning, + match=r'Maximum number of iterations'): + minimize(rosen, self.hard_guess, jac=rosen_der, + hess=rosen_hess, method='dogleg', + options={'disp': True, 'maxiter': 1}, ) + + def test_solver_concordance(self): + # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock + # test function, although this does not necessarily mean + # that dogleg is faster or better than ncg even for this function + # and especially not for other test functions. + f = rosen + g = rosen_der + h = rosen_hess + for x0 in (self.easy_guess, self.hard_guess): + r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='dogleg', options={'return_all': True}) + r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-ncg', + options={'return_all': True}) + r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-krylov', + options={'return_all': True}) + r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='newton-cg', options={'return_all': True}) + r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-exact', + options={'return_all': True}) + assert_allclose(self.x_opt, r_dogleg['x']) + assert_allclose(self.x_opt, r_trust_ncg['x']) + assert_allclose(self.x_opt, r_trust_krylov['x']) + assert_allclose(self.x_opt, r_ncg['x']) + assert_allclose(self.x_opt, r_iterative['x']) + assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs'])) + + def test_trust_ncg_hessp(self): + for x0 in (self.easy_guess, self.hard_guess, self.x_opt): + r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_ncg_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_krylov_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-krylov') + assert_allclose(self.x_opt, r['x']) + + def test_trust_exact_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-exact') + assert_allclose(self.x_opt, r['x']) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3e42fd9152728b8290a5703e902a38a6cea3b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py @@ -0,0 +1,352 @@ +""" +Unit tests for trust-region iterative subproblem. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import numpy as np +from scipy.optimize._trustregion_exact import ( + estimate_smallest_singular_value, + singular_leading_submatrix, + IterativeSubproblem) +from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm) +from numpy.testing import (assert_array_equal, + assert_equal, assert_array_almost_equal) + + +def random_entry(n, min_eig, max_eig, case): + + # Generate random matrix + rand = np.random.uniform(-1, 1, (n, n)) + + # QR decomposition + Q, _, _ = qr(rand, pivoting='True') + + # Generate random eigenvalues + eigvalues = np.random.uniform(min_eig, max_eig, n) + eigvalues = np.sort(eigvalues)[::-1] + + # Generate matrix + Qaux = np.multiply(eigvalues, Q) + A = np.dot(Qaux, Q.T) + + # Generate gradient vector accordingly + # to the case is being tested. + if case == 'hard': + g = np.zeros(n) + g[:-1] = np.random.uniform(-1, 1, n-1) + g = np.dot(Q, g) + elif case == 'jac_equal_zero': + g = np.zeros(n) + else: + g = np.random.uniform(-1, 1, n) + + return A, g + + +class TestEstimateSmallestSingularValue: + + def test_for_ill_condiotioned_matrix(self): + + # Ill-conditioned triangular matrix + C = np.array([[1, 2, 3, 4], + [0, 0.05, 60, 7], + [0, 0, 0.8, 9], + [0, 0, 0, 10]]) + + # Get svd decomposition + U, s, Vt = svd(C) + + # Get smallest singular value and correspondent right singular vector. + smin_svd = s[-1] + zmin_svd = Vt[-1, :] + + # Estimate smallest singular value + smin, zmin = estimate_smallest_singular_value(C) + + # Check the estimation + assert_array_almost_equal(smin, smin_svd, decimal=8) + assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8) + + +class TestSingularLeadingSubmatrix: + + def test_for_already_singular_leading_submatrix(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[1, 2, 3], + [2, 4, 5], + [3, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_simetric_indefinite_matrix(self): + + # Define test matrix A. + # Note that the leading 5x5 submatrix is indefinite. + A = np.asarray([[1, 2, 3, 7, 8], + [2, 5, 5, 9, 0], + [3, 5, 11, 1, 2], + [7, 9, 1, 7, 5], + [8, 0, 2, 5, 8]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_first_element_equal_to_zero(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[0, 3, 11], + [3, 12, 5], + [11, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fulfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + +class TestIterativeSubproblem: + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [1, 1, 1, 1] + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, [0.00393332, -0.55260862, + 0.67065477, -0.49480341]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [6.4852641521327437, 1, 1, 1] + s = -8.2151519874416614 + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(-s, subprob.lambda_current) + + def test_for_interior_convergence(self): + + H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]] + + g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H)) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + assert_array_almost_equal(subprob.lambda_current, 0) + assert_array_almost_equal(subprob.niter, 1) + + def test_for_jac_equal_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 0] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_jac_very_close_to_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 1e-15] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_random_entries(self): + # Seed + np.random.seed(1) + + # Dimension + n = 5 + + for case in ('easy', 'hard', 'jac_equal_zero'): + + eig_limits = [(-20, -15), + (-10, -5), + (-10, 0), + (-5, 5), + (-10, 10), + (0, 10), + (5, 10), + (15, 20)] + + for min_eig, max_eig in eig_limits: + # Generate random symmetric matrix H with + # eigenvalues between min_eig and max_eig. + H, g = random_entry(n, min_eig, max_eig, case) + + # Trust radius + trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10] + + for trust_radius in trust_radius_list: + # Solve subproblem with very high accuracy + subprob_ac = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=1e-10, + k_hard=1e-10) + + p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius) + + # Compute objective function value + J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac) + + stop_criteria = [(0.1, 2), + (0.5, 1.1), + (0.9, 1.01)] + + for k_opt, k_trf in stop_criteria: + + # k_easy and k_hard computed in function + # of k_opt and k_trf accordingly to + # Conn, A. R., Gould, N. I., & Toint, P. L. (2000). + # "Trust region methods". Siam. p. 197. + k_easy = min(k_trf-1, + 1-np.sqrt(k_opt)) + k_hard = 1-k_opt + + # Solve subproblem + subprob = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=k_easy, + k_hard=k_hard) + p, hits_boundary = subprob.solve(trust_radius) + + # Compute objective function value + J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p) + + # Check if it respect k_trf + if hits_boundary: + assert_array_equal(np.abs(norm(p)-trust_radius) <= + (k_trf-1)*trust_radius, True) + else: + assert_equal(norm(p) <= trust_radius, True) + + # Check if it respect k_opt + assert_equal(J <= k_opt*J_ac, True) + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py new file mode 100644 index 0000000000000000000000000000000000000000..b130362323c9ba4a126019fb13974a37b35d7a28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py @@ -0,0 +1,171 @@ +""" +Unit tests for Krylov space trust-region subproblem solver. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import numpy as np +from scipy.optimize._trlib import (get_trlib_quadratic_subproblem) +from numpy.testing import (assert_, + assert_almost_equal, + assert_equal, assert_array_almost_equal) + +KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6) +KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=True) + +class TestKrylovQuadraticSubproblem: + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([5.0, 0.0, 4.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, + np.array([-0.46125446, 0., -0.19298788])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([0.0, 2.0, 0.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_interior_convergence(self): + + H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]) + g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + + def test_for_very_close_to_zero(self): + + H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_disp(self, capsys): + H = -np.eye(5) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + subprob = KrylovQP_disp(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + out, err = capsys.readouterr() + assert_(out.startswith(' TR Solving trust region problem'), repr(out)) + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..86606d8c4318cb26825a8fa955b2ef7647f4009c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py @@ -0,0 +1,939 @@ +import pytest + +from functools import lru_cache + +from numpy.testing import (assert_warns, assert_, + assert_allclose, + assert_equal, + assert_array_equal, + suppress_warnings) +import numpy as np +from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos + +from scipy import optimize +from scipy.optimize import (_zeros_py as zeros, newton, root_scalar, + OptimizeResult) + +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +# Import testing parameters +from scipy.optimize._tstutils import get_tests, functions as tstutils_functions + +TOL = 4*np.finfo(float).eps # tolerance + +_FLOAT_EPS = finfo(float).eps + +bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth, + zeros.toms748] +gradient_methods = [zeros.newton] +all_methods = bracket_methods + gradient_methods + +# A few test functions used frequently: +# # A simple quadratic, (x-1)^2 - 1 +def f1(x): + return x ** 2 - 2 * x - 1 + + +def f1_1(x): + return 2 * x - 2 + + +def f1_2(x): + return 2.0 + 0 * x + + +def f1_and_p_and_pp(x): + return f1(x), f1_1(x), f1_2(x) + + +# Simple transcendental function +def f2(x): + return exp(x) - cos(x) + + +def f2_1(x): + return exp(x) + sin(x) + + +def f2_2(x): + return exp(x) + cos(x) + + +# lru cached function +@lru_cache +def f_lrucached(x): + return x + + +class TestScalarRootFinders: + # Basic tests for all scalar root finders + + xtol = 4 * np.finfo(float).eps + rtol = 4 * np.finfo(float).eps + + def _run_one_test(self, tc, method, sig_args_keys=None, + sig_kwargs_keys=None, **kwargs): + method_args = [] + for k in sig_args_keys or []: + if k not in tc: + # If a,b not present use x0, x1. Similarly for f and func + k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k) + method_args.append(tc[k]) + + method_kwargs = dict(**kwargs) + method_kwargs.update({'full_output': True, 'disp': False}) + for k in sig_kwargs_keys or []: + method_kwargs[k] = tc[k] + + root = tc.get('root') + func_args = tc.get('args', ()) + + try: + r, rr = method(*method_args, args=func_args, **method_kwargs) + return root, rr, tc + except Exception: + return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR, method), tc + + def run_tests(self, tests, method, name, known_fail=None, **kwargs): + r"""Run test-cases using the specified method and the supplied signature. + + Extract the arguments for the method call from the test case + dictionary using the supplied keys for the method's signature.""" + # The methods have one of two base signatures: + # (f, a, b, **kwargs) # newton + # (func, x0, **kwargs) # bisect/brentq/... + + # FullArgSpec with args, varargs, varkw, defaults, ... + sig = _getfullargspec(method) + assert_(not sig.kwonlyargs) + nDefaults = len(sig.defaults) + nRequired = len(sig.args) - nDefaults + sig_args_keys = sig.args[:nRequired] + sig_kwargs_keys = [] + if name in ['secant', 'newton', 'halley']: + if name in ['newton', 'halley']: + sig_kwargs_keys.append('fprime') + if name in ['halley']: + sig_kwargs_keys.append('fprime2') + kwargs['tol'] = self.xtol + else: + kwargs['xtol'] = self.xtol + kwargs['rtol'] = self.rtol + + results = [list(self._run_one_test( + tc, method, sig_args_keys=sig_args_keys, + sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests] + # results= [[true root, full output, tc], ...] + + known_fail = known_fail or [] + notcvgd = [elt for elt in results if not elt[1].converged] + notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail] + notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd] + assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []]) + + # The usable xtol and rtol depend on the test + tols = {'xtol': self.xtol, 'rtol': self.rtol} + tols.update(**kwargs) + rtol = tols['rtol'] + atol = tols.get('tol', tols['xtol']) + + cvgd = [elt for elt in results if elt[1].converged] + approx = [elt[1].root for elt in cvgd] + correct = [elt[0] for elt in cvgd] + # See if the root matches the reference value + notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if + not isclose(a, c, rtol=rtol, atol=atol) + and elt[-1]['ID'] not in known_fail] + # If not, evaluate the function and see if is 0 at the purported root + fvs = [tc['f'](aroot, *tc.get('args', tuple())) + for aroot, c, fullout, tc in notclose] + notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0] + assert_equal([notclose, len(notclose)], [[], 0]) + method_from_result = [result[1].method for result in results] + expected_method = [name for _ in results] + assert_equal(method_from_result, expected_method) + + def run_collection(self, collection, method, name, smoothness=None, + known_fail=None, **kwargs): + r"""Run a collection of tests using the specified method. + + The name is used to determine some optional arguments.""" + tests = get_tests(collection, smoothness=smoothness) + self.run_tests(tests, method, name, known_fail=known_fail, **kwargs) + + +class TestBracketMethods(TestScalarRootFinders): + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_root_scalar(self, method, function): + # Tests bracketing root finders called via `root_scalar` on a small + # set of simple problems, each of which has a root at `x=1`. Checks for + # converged status and that the root was found. + a, b = .5, sqrt(3) + + r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a, + xtol=self.xtol, rtol=self.rtol) + assert r.converged + assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol) + assert r.method == method.__name__ + + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_individual(self, method, function): + # Tests individual bracketing root finders on a small set of simple + # problems, each of which has a root at `x=1`. Checks for converged + # status and that the root was found. + a, b = .5, sqrt(3) + root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol, + full_output=True) + + assert r.converged + assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol) + + @pytest.mark.parametrize('method', bracket_methods) + def test_aps_collection(self, method): + self.run_collection('aps', method, method.__name__, smoothness=1) + + @pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder, + zeros.toms748]) + def test_chandrupatla_collection(self, method): + known_fail = {'fun7.4'} if method == zeros.ridder else {} + self.run_collection('chandrupatla', method, method.__name__, + known_fail=known_fail) + + @pytest.mark.parametrize('method', bracket_methods) + def test_lru_cached_individual(self, method): + # check that https://github.com/scipy/scipy/issues/10846 is fixed + # (`root_scalar` failed when passed a function that was `@lru_cache`d) + a, b = -1, 1 + root, r = method(f_lrucached, a, b, full_output=True) + assert r.converged + assert_allclose(root, 0) + + +class TestNewton(TestScalarRootFinders): + def test_newton_collections(self): + known_fail = ['aps.13.00'] + known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27 + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'newton', + smoothness=2, known_fail=known_fail) + + def test_halley_collections(self): + known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', + 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', + 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', + 'aps.12.18', 'aps.13.00'] + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'halley', + smoothness=2, known_fail=known_fail) + + def test_newton(self): + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + x = zeros.newton(f, 3, tol=1e-6) + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1 + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley + assert_allclose(f(x), 0, atol=1e-6) + + def test_newton_by_name(self): + r"""Invoke newton through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f' + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_secant_by_name(self): + r"""Invoke secant through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1 + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_halley_by_name(self): + r"""Invoke halley through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='halley', x0=3, + fprime=f_1, fprime2=f_2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_root_scalar_fail(self): + message = 'fprime2 must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 + message = 'fprime must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime + + def test_array_newton(self): + """test newton with array""" + + def f1(x, *a): + b = a[0] + x * a[3] + return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x + + def f1_1(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1 + + def f1_2(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2 + + a0 = np.array([ + 5.32725221, 5.48673747, 5.49539973, + 5.36387202, 4.80237316, 1.43764452, + 5.23063958, 5.46094772, 5.50512718, + 5.42046290 + ]) + a1 = (np.sin(range(10)) + 1.0) * 7.0 + args = (a0, a1, 1e-09, 0.004, 10, 0.27456) + x0 = [7.0] * 10 + x = zeros.newton(f1, x0, f1_1, args) + x_expected = ( + 6.17264965, 11.7702805, 12.2219954, + 7.11017681, 1.18151293, 0.143707955, + 4.31928228, 10.5419107, 12.7552490, + 8.91225749 + ) + assert_allclose(x, x_expected) + # test halley's + x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) + assert_allclose(x, x_expected) + # test secant + x = zeros.newton(f1, x0, args=args) + assert_allclose(x, x_expected) + + def test_array_newton_complex(self): + def f(x): + return x + 1+1j + + def fprime(x): + return 1.0 + + t = np.full(4, 1j) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + # should work even if x0 is not complex + t = np.ones(4) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + x = zeros.newton(f, t) + assert_allclose(f(x), 0.) + + def test_array_secant_active_zero_der(self): + """test secant doesn't continue to iterate zero derivatives""" + x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5], + args=[np.array([17, 25])]) + assert_allclose(x, (4.123105625617661, 5.0)) + + def test_array_newton_integers(self): + # test secant with float + x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2, + args=([15.0, 17.0],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + # test integer becomes float + x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + + def test_array_newton_zero_der_failures(self): + # test derivative zero warning + assert_warns(RuntimeWarning, zeros.newton, + lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y) + # test failures and zero_der + with pytest.warns(RuntimeWarning): + results = zeros.newton(lambda y: y**2 - 2, [0., 0.], + lambda y: 2*y, full_output=True) + assert_allclose(results.root, 0) + assert results.zero_der.all() + assert not results.converged.any() + + def test_newton_combined(self): + def f1(x): + return x ** 2 - 2 * x - 1 + def f1_1(x): + return 2 * x - 2 + def f1_2(x): + return 2.0 + 0 * x + + def f1_and_p_and_pp(x): + return x**2 - 2*x-1, 2*x-2, 2.0 + + sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) + sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(2*sol.function_calls, sol0.function_calls) + + sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) + sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(3*sol.function_calls, sol0.function_calls) + + def test_newton_full_output(self, capsys): + # Test the full_output capability, both when converging and not. + # Use simple polynomials, to avoid hitting platform dependencies + # (e.g., exp & trig) in number of iterations + + x0 = 3 + expected_counts = [(6, 7), (5, 10), (3, 9)] + + for derivs in range(3): + kwargs = {'tol': 1e-6, 'full_output': True, } + for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]: + kwargs[k] = v + + x, r = zeros.newton(f1, x0, disp=False, **kwargs) + assert_(r.converged) + assert_equal(x, r.root) + assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) + if derivs == 0: + assert r.function_calls <= r.iterations + 1 + else: + assert_equal(r.function_calls, (derivs + 1) * r.iterations) + + # Now repeat, allowing one fewer iteration to force convergence failure + iters = r.iterations - 1 + x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs) + assert_(not r.converged) + assert_equal(x, r.root) + assert_equal(r.iterations, iters) + + if derivs == 1: + # Check that the correct Exception is raised and + # validate the start of the message. + msg = 'Failed to converge after %d iterations, value is .*' % (iters) + with pytest.raises(RuntimeError, match=msg): + x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs) + + def test_deriv_zero_warning(self): + def func(x): + return x ** 2 - 2.0 + def dfunc(x): + return 2 * x + assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False) + with pytest.raises(RuntimeError, match='Derivative was zero'): + zeros.newton(func, 0.0, dfunc) + + def test_newton_does_not_modify_x0(self): + # https://github.com/scipy/scipy/issues/9964 + x0 = np.array([0.1, 3]) + x0_copy = x0.copy() # Copy to test for equality. + newton(np.sin, x0, np.cos) + assert_array_equal(x0, x0_copy) + + def test_gh17570_defaults(self): + # Previously, when fprime was not specified, root_scalar would default + # to secant. When x1 was not specified, secant failed. + # Check that without fprime, the default is secant if x1 is specified + # and newton otherwise. + res_newton_default = root_scalar(f1, method='newton', x0=3, xtol=1e-6) + res_secant_default = root_scalar(f1, method='secant', x0=3, x1=2, + xtol=1e-6) + # `newton` uses the secant method when `x1` and `x2` are specified + res_secant = newton(f1, x0=3, x1=2, tol=1e-6, full_output=True)[1] + + # all three found a root + assert_allclose(f1(res_newton_default.root), 0, atol=1e-6) + assert res_newton_default.root.shape == tuple() + assert_allclose(f1(res_secant_default.root), 0, atol=1e-6) + assert res_secant_default.root.shape == tuple() + assert_allclose(f1(res_secant.root), 0, atol=1e-6) + assert res_secant.root.shape == tuple() + + # Defaults are correct + assert (res_secant_default.root + == res_secant.root + != res_newton_default.iterations) + assert (res_secant_default.iterations + == res_secant_default.function_calls - 1 # true for secant + == res_secant.iterations + != res_newton_default.iterations + == res_newton_default.function_calls/2) # newton 2-point diff + + @pytest.mark.parametrize('kwargs', [dict(), {'method': 'newton'}]) + def test_args_gh19090(self, kwargs): + def f(x, a, b): + assert a == 3 + assert b == 1 + return (x ** a - b) + + res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs) + assert res.converged + assert_allclose(res.root, 1) + + @pytest.mark.parametrize('method', ['secant', 'newton']) + def test_int_x0_gh19280(self, method): + # Originally, `newton` ensured that only floats were passed to the + # callable. This was indadvertently changed by gh-17669. Check that + # it has been changed back. + def f(x): + # an integer raised to a negative integer power would fail + return x**-2 - 2 + + res = optimize.root_scalar(f, x0=1, method=method) + assert res.converged + assert_allclose(abs(res.root), 2**-0.5) + assert res.root.dtype == np.dtype(np.float64) + + +def test_gh_5555(): + root = 0.1 + + def f(x): + return x - root + + methods = [zeros.bisect, zeros.ridder] + xtol = rtol = TOL + for method in methods: + res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol) + assert_allclose(root, res, atol=xtol, rtol=rtol, + err_msg='method %s' % method.__name__) + + +def test_gh_5557(): + # Show that without the changes in 5557 brentq and brenth might + # only achieve a tolerance of 2*(xtol + rtol*|res|). + + # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1, + # 0.4). The important parts are that |f(0)| < |f(1)| (so that + # brent takes 0 as the initial guess), |f(0)| < atol (so that + # brent accepts 0 as the root), and that the exact root of f lies + # more than atol away from 0 (so that brent doesn't achieve the + # desired tolerance). + def f(x): + if x < 0.5: + return -0.1 + else: + return x - 0.6 + + atol = 0.51 + rtol = 4 * _FLOAT_EPS + methods = [zeros.brentq, zeros.brenth] + for method in methods: + res = method(f, 0, 1, xtol=atol, rtol=rtol) + assert_allclose(0.6, res, atol=atol, rtol=rtol) + + +def test_brent_underflow_in_root_bracketing(): + # Testing if an interval [a,b] brackets a zero of a function + # by checking f(a)*f(b) < 0 is not reliable when the product + # underflows/overflows. (reported in issue# 13737) + + underflow_scenario = (-450.0, -350.0, -400.0) + overflow_scenario = (350.0, 450.0, 400.0) + + for a, b, root in [underflow_scenario, overflow_scenario]: + c = np.exp(root) + for method in [zeros.brenth, zeros.brentq]: + res = method(lambda x: np.exp(x)-c, a, b) + assert_allclose(root, res) + + +class TestRootResults: + r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0, + method="newton") + + def test_repr(self): + expected_repr = (" converged: True\n flag: converged" + "\n function_calls: 46\n iterations: 44\n" + " root: 1.0\n method: newton") + assert_equal(repr(self.r), expected_repr) + + def test_type(self): + assert isinstance(self.r, OptimizeResult) + + +def test_complex_halley(): + """Test Halley's works with complex roots""" + def f(x, *a): + return a[0] * x**2 + a[1] * x + a[2] + + def f_1(x, *a): + return 2 * a[0] * x + a[1] + + def f_2(x, *a): + retval = 2 * a[0] + try: + size = len(x) + except TypeError: + return retval + else: + return [retval] * size + + z = complex(1.0, 2.0) + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + # (-0.75000000000000078+1.1989578808281789j) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + z = [z] * 10 + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + + +def test_zero_der_nz_dp(capsys): + """Test secant method with a non-zero dp, but an infinite newton step""" + # pick a symmetrical functions and choose a point on the side that with dx + # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, + # which has a root at x = 100 and is symmetrical around the line x = 100 + # we have to pick a really big number so that it is consistently true + # now find a point on each side so that the secant has a zero slope + dx = np.finfo(float).eps ** 0.33 + # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 + # -> 200 = p0 * (2 + dx) + dx + p0 = (200.0 - dx) / (2.0 + dx) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "RMS of") + x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) + assert_allclose(x, [100] * 10) + # test scalar cases too + p0 = (2.0 - 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, 1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True) + p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, -1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True) + + +def test_array_newton_failures(): + """Test that array newton fails as expected""" + # p = 0.68 # [MPa] + # dp = -0.068 * 1e6 # [Pa] + # T = 323 # [K] + diameter = 0.10 # [m] + # L = 100 # [m] + roughness = 0.00015 # [m] + rho = 988.1 # [kg/m**3] + mu = 5.4790e-04 # [Pa*s] + u = 2.488 # [m/s] + reynolds_number = rho * u * diameter / mu # Reynolds number + + def colebrook_eqn(darcy_friction, re, dia): + return (1 / np.sqrt(darcy_friction) + + 2 * np.log10(roughness / 3.7 / dia + + 2.51 / re / np.sqrt(darcy_friction))) + + # only some failures + with pytest.warns(RuntimeWarning): + result = zeros.newton( + colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + assert not result.converged.all() + # they all fail + with pytest.raises(RuntimeError): + result = zeros.newton( + colebrook_eqn, x0=[0.01] * 2, maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + + +# this test should **not** raise a RuntimeWarning +def test_gh8904_zeroder_at_root_fails(): + """Test that Newton or Halley don't warn if zero derivative at root""" + + # a function that has a zero derivative at it's root + def f_zeroder_root(x): + return x**3 - x**2 + + # should work with secant + r = zeros.newton(f_zeroder_root, x0=0) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # 1st derivative + def fder(x): + return 3 * x**2 - 2 * x + + # 2nd derivative + def fder2(x): + return 6*x - 2 + + # should work with newton and halley + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # also test that if a root is found we do not raise RuntimeWarning even if + # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and + # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the + # root, but if the solver continued with that guess, then it will calculate + # a zero derivative, so it should return the root w/o RuntimeWarning + r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # doesn't apply to halley + + +def test_gh_8881(): + r"""Test that Halley's method realizes that the 2nd order adjustment + is too big and drops off to the 1st order adjustment.""" + n = 9 + + def f(x): + return power(x, 1.0/n) - power(n, 1.0/n) + + def fp(x): + return power(x, (1.0-n)/n)/n + + def fpp(x): + return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n + + x0 = 0.1 + # The root is at x=9. + # The function has positive slope, x0 < root. + # Newton succeeds in 8 iterations + rt, r = newton(f, x0, fprime=fp, full_output=True) + assert r.converged + # Before the Issue 8881/PR 8882, halley would send x in the wrong direction. + # Check that it now succeeds. + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + +def test_gh_9608_preserve_array_shape(): + """ + Test that shape is preserved for array inputs even if fprime or fprime2 is + scalar + """ + def f(x): + return x**2 + + def fp(x): + return 2 * x + + def fpp(x): + return 2 + + x0 = np.array([-2], dtype=np.float32) + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + x0_array = np.array([-2, -3], dtype=np.float32) + # This next invocation should fail + with pytest.raises(IndexError): + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp, full_output=True + ) + + def fpp_array(x): + return np.full(np.shape(x), 2, dtype=np.float32) + + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True + ) + assert result.converged.all() + + +@pytest.mark.parametrize( + "maximum_iterations,flag_expected", + [(10, zeros.CONVERR), (100, zeros.CONVERGED)]) +def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected): + """ + Test that if the maximum iterations is exceeded that the flag is not + converged. + """ + result = zeros.brentq( + lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5, + -30, 30, (), 1e-6, 1e-6, maximum_iterations, + full_output=True, disp=False) + assert result[1].flag == flag_expected + if flag_expected == zeros.CONVERR: + # didn't converge because exceeded maximum iterations + assert result[1].iterations == maximum_iterations + elif flag_expected == zeros.CONVERGED: + # converged before maximum iterations + assert result[1].iterations < maximum_iterations + + +def test_gh9551_raise_error_if_disp_true(): + """Test that if disp is true then zero derivative raises RuntimeError""" + + def f(x): + return x*x + 1 + + def f_p(x): + return 2*x + + assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False) + with pytest.raises( + RuntimeError, + match=r'^Derivative was zero\. Failed to converge after \d+ iterations, ' + r'value is [+-]?\d*\.\d+\.$'): + zeros.newton(f, 1.0, f_p) + root = zeros.newton(f, complex(10.0, 10.0), f_p) + assert_allclose(root, complex(0.0, 1.0)) + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh3089_8394(solver_name): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that this is resolved. + def f(x): + return np.nan + + solver = getattr(zeros, solver_name) + with pytest.raises(ValueError, match="The function value at x..."): + solver(f, 0, 1) + + +@pytest.mark.parametrize('method', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh18171(method): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that `root_scalar` returns + # normally but indicates that convergence was unsuccessful. See gh-18171. + def f(x): + f._count += 1 + return np.nan + f._count = 0 + + res = root_scalar(f, bracket=(0, 1), method=method) + assert res.converged is False + assert res.flag.startswith("The function value at x") + assert res.function_calls == f._count + assert str(res.root) in res.flag + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_function_calls(solver_name, rs_interface): + # There do not appear to be checks that the bracketing solvers report the + # correct number of function evaluations. Check that this is the case. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + f.calls += 1 + return x**2 - 1 + f.calls = 0 + + res = solver(f, 0, 10, full_output=True) + + if rs_interface: + assert res.function_calls == f.calls + else: + assert res[1].function_calls == f.calls + + +def test_gh_14486_converged_false(): + """Test that zero slope with secant method results in a converged=False""" + def lhs(x): + return x * np.exp(-x*x) - 0.07 + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0) + assert not res.converged + assert res.flag == 'convergence error' + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1] + assert not res.converged + assert res.flag == 'convergence error' + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_gh5584(solver_name, rs_interface): + # gh-5584 reported that an underflow can cause sign checks in the algorithm + # to fail. Check that this is resolved. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + return 1e-200*x + + # Report failure when signs are the same + with pytest.raises(ValueError, match='...must have different signs'): + solver(f, -0.5, -0.4, full_output=True) + + # Solve successfully when signs are different + res = solver(f, -0.5, 0.4, full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + # Solve successfully when one side is negative zero + res = solver(f, -0.5, float('-0.0'), full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + +def test_gh13407(): + # gh-13407 reported that the message produced by `scipy.optimize.toms748` + # when `rtol < eps` is incorrect, and also that toms748 is unusual in + # accepting `rtol` as low as eps while other solvers raise at 4*eps. Check + # that the error message has been corrected and that `rtol=eps` can produce + # a lower function value than `rtol=4*eps`. + def f(x): + return x**3 - 2*x - 5 + + xtol = 1e-300 + eps = np.finfo(float).eps + x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps) + f1 = f(x1) + x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps) + f4 = f(x4) + assert f1 < f4 + + # using old-style syntax to get exactly the same message + message = fr"rtol too small \({eps/2:g} < {eps:g}\)" + with pytest.raises(ValueError, match=message): + zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2) + + +def test_newton_complex_gh10103(): + # gh-10103 reported a problem when `newton` is pass a Python complex x0, + # no `fprime` (secant method), and no `x1` (`x1` must be constructed). + # Check that this is resolved. + def f(z): + return z - 1 + res = newton(f, 1+1j) + assert_allclose(res, 1, atol=1e-12) + + res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant') + assert_allclose(res.root, 1, atol=1e-12) + + +@pytest.mark.parametrize('method', all_methods) +def test_maxiter_int_check_gh10236(method): + # gh-10236 reported that the error message when `maxiter` is not an integer + # was difficult to interpret. Check that this was resolved (by gh-10907). + message = "'float' object cannot be interpreted as an integer" + with pytest.raises(TypeError, match=message): + method(f1, 0.0, 1.0, maxiter=72.45)