diff --git a/ckpts/universal/global_step40/zero/12.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/12.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d68392e10923762f310a305d2c54ac98e6200053 --- /dev/null +++ b/ckpts/universal/global_step40/zero/12.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8523dc545e41fb0b6435f525322994f037664ec33efa489269a66de79422e5c4 +size 9293 diff --git a/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..07a1efaf99c7d8db2a3a75e8addb2849f4a2d608 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:614aa7f29fa06123715521ffaa2f43b6826e36acf19d40084407078c7829ac07 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..0edc4f49572c3bb892eacc37bcad63105ac0dc87 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:658c7a26f3763b13aecbef97dd2036f89fd50678f5de2a2dbab2fae752216530 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/20.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/20.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..055ba39112037c4f577915147b556116ad629816 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f4c31ad044e554f78ebbff7c1d7485ddab94dcd2f67e220d2c88116c0566e64 +size 16778317 diff --git a/ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..07e3a376d9db6477d3907fb8e43d5a3835ed3462 --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df575a3954914370f0a4b80e38c072cf179dc97ea665d8a62dbb38b15f5c82a5 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61bc729449f97438905911f6c380872bfd9dfad4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b74142f20f50bb4433305c33ad0ce1e6460a5c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_bracket.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f55b21c51684249b1c172f71e682af6a60c9cb0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e466a77d1f4aebce2ecb84fb40593087d630ffc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_constraints.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4de5809f1590e5e09ba989f9664670d9184f521 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a6571ebf5188a3346c553feaa4d5a29774f0db9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..535c6d336c6a2570f5b83be3d98b35ae72d5b5a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16ea51f8bb21592cfc2935ed23920dab67764232 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_direct_py.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c4196ddb825b529ecd0912d921f4978cc5f3483 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f764656bed243a31b23dbeda65a45bc711a65b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f76d801993103b058b5b3294135b325a6b5751 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..080e9410092155905399aa50622a85c29328ba3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42c9d6950d6231dbdc25d17afdc1e80d469dd4f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64c661c75b87cbcd807545f85caf358f531b6664 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be8eadd172653e7a7c2f7bf295d16fe3aeda2b79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0f94c4670d52dc44f32b3555a264ae7f8d1dc4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3afbbf66a876ccc0f12b7f21a17cda30a7d0104f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nonlin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..868f7a234e92b9637e5cfd43822a3785456f06ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_qap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d72e694b4dc1f9b6e475122708f691c7033c64e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79947111108b86b2244f52422d9bfb37a2bd09b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..565eed82ad67966eac72f4d2508753f614344541 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4f9ce23a4e4fcd066526c607fbcbb110b5f7cf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_spectral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e7a4fcec7461c9539aa3a929dc627060ceafef7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e71802db24dcf50a41de9c55eaad973f26ac156e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..835842bd02e003324e3f4dd4603887f11a691694 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7945ca39eaba07f181be3d2d312c1f4bad32f84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4dd248df883f372e81036a77b7c94156fbfb292 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/linesearch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33607db5cfba796a836859a52b2e555e20653d24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7038fcfa4eb0ce15882d4b08d563d43aa6307955 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/nonlin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a14ccf5ddbe93fe05236330b8bce44c66bfa5c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/optimize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4bed71f4296002b9c70931679740c0425cb6a6f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/slsqp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fc4eab08e8f4c025d8e495ceeefd6dadc14274c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04ad509bb916e187f9ae072dae8268a8d453e572 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7f47cae02699247f62f6c3730f113c4977423ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf6930b7fb28e796a45c2f7b88624ad2cd5591b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..096871db54d9e960c774a2355300622caa497282 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ff0fdf2127a49d559c469b82ec05cfaeb4cb6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..582e8304d849c446d59830a7019aa17440969a48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb593757cda9aba8ac023715b867ddb27baf785c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd788422b8d37ce984a1e716444b3e8a288ecac Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4bf2effc0e03350af8fd374c8143f89b505bac1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..9a44333d0a8199195a2b35af3f4badf615712b2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py @@ -0,0 +1,379 @@ +# Dual annealing unit tests implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, PMP S.A. +""" +Unit tests for the dual annealing global optimizer +""" +from scipy.optimize import dual_annealing, Bounds + +from scipy.optimize._dual_annealing import EnergyState +from scipy.optimize._dual_annealing import LocalSearchWrapper +from scipy.optimize._dual_annealing import ObjectiveFunWrapper +from scipy.optimize._dual_annealing import StrategyChain +from scipy.optimize._dual_annealing import VisitingDistribution +from scipy.optimize import rosen, rosen_der +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_array_less +from pytest import raises as assert_raises +from scipy._lib._util import check_random_state + + +class TestDualAnnealing: + + def setup_method(self): + # A function that returns always infinity for initialization tests + self.weirdfunc = lambda x: np.inf + # 2-D bounds for testing function + self.ld_bounds = [(-5.12, 5.12)] * 2 + # 4-D bounds for testing function + self.hd_bounds = self.ld_bounds * 4 + # Number of values to be generated for testing visit function + self.nbtestvalues = 5000 + self.high_temperature = 5230 + self.low_temperature = 0.1 + self.qv = 2.62 + self.seed = 1234 + self.rs = check_random_state(self.seed) + self.nb_fun_call = 0 + self.ngev = 0 + + def callback(self, x, f, context): + # For testing callback mechanism. Should stop for e <= 1 as + # the callback function returns True + if f <= 1.0: + return True + + def func(self, x, args=()): + # Using Rastrigin function for performing tests + if args: + shift = args + else: + shift = 0 + y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * ( + x - shift))) + 10 * np.size(x) + shift + self.nb_fun_call += 1 + return y + + def rosen_der_wrapper(self, x, args=()): + self.ngev += 1 + return rosen_der(x, *args) + + # FIXME: there are some discontinuities in behaviour as a function of `qv`, + # this needs investigating - see gh-12384 + @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9]) + def test_visiting_stepping(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + dim = lower.size + vd = VisitingDistribution(lower, upper, qv, self.rs) + values = np.zeros(dim) + x_step_low = vd.visiting(values, 0, self.high_temperature) + # Make sure that only the first component is changed + assert_equal(np.not_equal(x_step_low, 0), True) + values = np.zeros(dim) + x_step_high = vd.visiting(values, dim, self.high_temperature) + # Make sure that component other than at dim has changed + assert_equal(np.not_equal(x_step_high[0], 0), True) + + @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9]) + def test_visiting_dist_high_temperature(self, qv): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + vd = VisitingDistribution(lower, upper, qv, self.rs) + # values = np.zeros(self.nbtestvalues) + # for i in np.arange(self.nbtestvalues): + # values[i] = vd.visit_fn(self.high_temperature) + values = vd.visit_fn(self.high_temperature, self.nbtestvalues) + + # Visiting distribution is a distorted version of Cauchy-Lorentz + # distribution, and as no 1st and higher moments (no mean defined, + # no variance defined). + # Check that big tails values are generated + assert_array_less(np.min(values), 1e-10) + assert_array_less(1e+10, np.max(values)) + + def test_reset(self): + owf = ObjectiveFunWrapper(self.weirdfunc) + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + es = EnergyState(lower, upper) + assert_raises(ValueError, es.reset, owf, check_random_state(None)) + + def test_low_dim(self): + ret = dual_annealing( + self.func, self.ld_bounds, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + def test_high_dim(self): + ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + assert ret.success + + def test_low_dim_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_high_dim_no_ls(self): + ret = dual_annealing(self.func, self.hd_bounds, + no_local_search=True, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_nb_fun_call(self): + ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_nb_fun_call_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, seed=self.seed) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_max_reinit(self): + assert_raises(ValueError, dual_annealing, self.weirdfunc, + self.ld_bounds) + + def test_reproduce(self): + res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + assert_equal(res1.x, res3.x) + + def test_rand_gen(self): + # check that np.random.Generator can be used (numpy >= 1.17) + # obtain a np.random.Generator object + rng = np.random.default_rng(1) + + res1 = dual_annealing(self.func, self.ld_bounds, seed=rng) + # seed again + rng = np.random.default_rng(1) + res2 = dual_annealing(self.func, self.ld_bounds, seed=rng) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + + def test_bounds_integrity(self): + wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] + assert_raises(ValueError, dual_annealing, self.func, + wrong_bounds) + + def test_bound_validity(self): + invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + + def test_deprecated_local_search_options_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + def test_minimizer_kwargs_bounds(self): + def func(x): + return np.sum((x - 5) * (x - 1)) + bounds = list(zip([-6, -5], [6, 5])) + # Test bounds can be passed (see gh-10831) + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "SLSQP", "bounds": bounds}) + + with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): + dual_annealing( + func, + bounds=bounds, + minimizer_kwargs={"method": "CG", "bounds": bounds}) + + def test_max_fun_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, maxfun=100, + seed=self.seed) + + ls_max_iter = min(max( + len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, + LocalSearchWrapper.LS_MAXITER_MIN), + LocalSearchWrapper.LS_MAXITER_MAX) + assert ret.nfev <= 100 + ls_max_iter + assert not ret.success + + def test_max_fun_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, maxfun=500, seed=self.seed) + assert ret.nfev <= 500 + assert not ret.success + + def test_maxiter(self): + ret = dual_annealing(self.func, self.ld_bounds, maxiter=700, + seed=self.seed) + assert ret.nit <= 700 + + # Testing that args are passed correctly for dual_annealing + def test_fun_args_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159,)), seed=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-6) + + # Testing that args are passed correctly for pure simulated annealing + def test_fun_args_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159, )), no_local_search=True, + seed=self.seed) + assert_allclose(ret.fun, 3.14159, atol=1e-4) + + def test_callback_stop(self): + # Testing that callback make the algorithm stop for + # fun value <= 1.0 (see callback method) + ret = dual_annealing(self.func, self.ld_bounds, + callback=self.callback, seed=self.seed) + assert ret.fun <= 1.0 + assert 'stop early' in ret.message[0] + assert not ret.success + + @pytest.mark.parametrize('method, atol', [ + ('Nelder-Mead', 2e-5), + ('COBYLA', 1e-5), + ('Powell', 1e-8), + ('CG', 1e-8), + ('BFGS', 1e-8), + ('TNC', 1e-8), + ('SLSQP', 2e-7), + ]) + def test_multi_ls_minimizer(self, method, atol): + ret = dual_annealing(self.func, self.ld_bounds, + minimizer_kwargs=dict(method=method), + seed=self.seed) + assert_allclose(ret.fun, 0., atol=atol) + + def test_wrong_restart_temp(self): + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=1) + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=0) + + def test_gradient_gnev(self): + minimizer_opts = { + 'jac': self.rosen_der_wrapper, + } + ret = dual_annealing(rosen, self.ld_bounds, + minimizer_kwargs=minimizer_opts, + seed=self.seed) + assert ret.njev == self.ngev + + def test_from_docstring(self): + def func(x): + return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + lw = [-5.12] * 10 + up = [5.12] * 10 + ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) + assert_allclose(ret.x, + [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, + -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, + -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, + -5.00668935e-09], atol=4e-8) + assert_allclose(ret.fun, 0.000000, atol=5e-13) + + @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [ + (0, 100, 1000, 1.0097587941791923), + (0, 2, 1000, 1.2599210498948732), + (10, 100, 878, 0.8786035869128718), + (10, 60, 695, 0.6812920690579612), + (2, 100, 990, 0.9897404249173424), + ]) + def test_accept_reject_probabilistic( + self, new_e, temp_step, accepted, accept_rate): + # Test accepts unconditionally with e < current_energy and + # probabilistically with e > current_energy + + rs = check_random_state(123) + + count_accepted = 0 + iterations = 1000 + + accept_param = -5 + current_energy = 1 + for _ in range(iterations): + energy_state = EnergyState(lower=None, upper=None) + # Set energy state with current_energy, any location. + energy_state.update_current(current_energy, [0]) + + chain = StrategyChain( + accept_param, None, None, None, rs, energy_state) + # Normally this is set in run() + chain.temperature_step = temp_step + + # Check if update is accepted. + chain.accept_reject(j=1, e=new_e, x_visit=[2]) + if energy_state.current_energy == new_e: + count_accepted += 1 + + assert count_accepted == accepted + + # Check accept rate + pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step + rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param)) + + assert_allclose(rate, accept_rate) + + def test_bounds_class(self): + # test that result does not depend on the bounds type + def func(x): + f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) + return f + lw = [-5.12] * 5 + up = [5.12] * 5 + + # Unbounded global minimum is all zeros. Most bounds below will force + # a DV away from unbounded minimum and be active at solution. + up[0] = -2.0 + up[1] = -1.0 + lw[3] = 1.0 + lw[4] = 2.0 + + # run optimizations + bounds = Bounds(lw, up) + ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234) + + bounds_old = list(zip(lw, up)) + ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234) + + # test that found minima, function evaluations and iterations match + assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8) + assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7) + assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9) + assert ret_bounds_list.nfev == ret_bounds_class.nfev + + def test_callable_jac_with_args_gh11052(self): + # dual_annealing used to fail when `jac` was callable and `args` were + # used; check that this is resolved. Example is from gh-11052. + rng = np.random.default_rng(94253637693657847462) + def f(x, power): + return np.sum(np.exp(x ** power)) + + def jac(x, power): + return np.exp(x ** power) * power * x ** (power - 1) + + res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, + minimizer_kwargs=dict(method='L-BFGS-B')) + res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, + minimizer_kwargs=dict(method='L-BFGS-B', + jac=jac)) + assert_allclose(res1.fun, res2.fun, rtol=1e-6) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0e4097bc9aadbfd3335aa3a86d063216f2c69a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py @@ -0,0 +1,310 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +from scipy.optimize._linprog_util import _clean_inputs, _LPProblem +from scipy._lib._util import VisibleDeprecationWarning +from copy import deepcopy +from datetime import date + + +def test_aliasing(): + """ + Test for ensuring that no objects referred to by `lp` attributes, + `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified + by `_clean_inputs` as a side effect. + """ + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=[1], + A_eq=[[1]], + b_eq=[1], + bounds=(-np.inf, np.inf) + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_(lp.c == lp_copy.c, "c modified by _clean_inputs") + assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs") + assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs") + assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs") + assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_aliasing2(): + """ + Similar purpose as `test_aliasing` above. + """ + lp = _LPProblem( + c=np.array([1, 1]), + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([[1], [1]]), + A_eq=np.array([[1, 1]]), + b_eq=np.array([1]), + bounds=[(-np.inf, np.inf), (None, 1)] + ) + lp_copy = deepcopy(lp) + + _clean_inputs(lp) + + assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs") + assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs") + assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs") + assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs") + assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs") + assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") + + +def test_missing_inputs(): + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + + assert_raises(TypeError, _clean_inputs) + assert_raises(TypeError, _clean_inputs, _LPProblem(c=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq)) + + +def test_too_many_dimensions(): + cb = [1, 2, 3, 4] + A = np.random.rand(4, 4) + bad2D = [[1, 2], [3, 4]] + bad3D = np.random.rand(4, 4, 4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D)) + + +def test_too_few_dimensions(): + bad = np.random.rand(4, 4).ravel() + cb = np.random.rand(4) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb)) + + +def test_inconsistent_dimensions(): + m = 2 + n = 4 + c = [1, 2, 3, 4] + + Agood = np.random.rand(m, n) + Abad = np.random.rand(m, n + 1) + bgood = np.random.rand(m) + bbad = np.random.rand(m + 1) + boundsbad = [(0, 1)] * (n + 1) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad)) + assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad)) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]])) + + +def test_type_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + bad = "hello" + + assert_raises(TypeError, _clean_inputs, lp._replace(c=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad)) + assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad)) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad)) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi")) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")])) + assert_raises(TypeError, _clean_inputs, + lp._replace(bounds=[(1, date(2020, 2, 29))])) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]])) + + +def test_non_finite_errors(): + lp = _LPProblem( + c=[1, 2], + A_ub=np.array([[1, 1], [2, 2]]), + b_ub=np.array([1, 1]), + A_eq=np.array([[1, 1], [2, 2]]), + b_eq=np.array([1, 1]), + bounds=[(0, 1)] + ) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf])) + assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0])) + + assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1])) + assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]])) + assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan])) + + +def test__clean_inputs1(): + lp = _LPProblem( + c=[1, 2], + A_ub=[[1, 1], [2, 2]], + b_ub=[1, 1], + A_eq=[[1, 1], [2, 2]], + b_eq=[1, 1], + bounds=None + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.A_ub.shape == (2, 2), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.A_eq.shape == (2, 2), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test__clean_inputs2(): + lp = _LPProblem( + c=1, + A_ub=[[1]], + b_ub=1, + A_eq=[[1]], + b_eq=1, + bounds=(0, 1) + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array(lp.c)) + assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) + assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) + assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) + assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) + assert_equal(lp_cleaned.bounds, [(0, 1)]) + + assert_(lp_cleaned.c.shape == (1,), "") + assert_(lp_cleaned.A_ub.shape == (1, 1), "") + assert_(lp_cleaned.b_ub.shape == (1,), "") + assert_(lp_cleaned.A_eq.shape == (1, 1), "") + assert_(lp_cleaned.b_eq.shape == (1,), "") + + +def test__clean_inputs3(): + lp = _LPProblem( + c=[[1, 2]], + A_ub=np.random.rand(2, 2), + b_ub=[[1], [2]], + A_eq=np.random.rand(2, 2), + b_eq=[[1], [2]], + bounds=[(0, 1)] + ) + + lp_cleaned = _clean_inputs(lp) + + assert_allclose(lp_cleaned.c, np.array([1, 2])) + assert_allclose(lp_cleaned.b_ub, np.array([1, 2])) + assert_allclose(lp_cleaned.b_eq, np.array([1, 2])) + assert_equal(lp_cleaned.bounds, [(0, 1)] * 2) + + assert_(lp_cleaned.c.shape == (2,), "") + assert_(lp_cleaned.b_ub.shape == (2,), "") + assert_(lp_cleaned.b_eq.shape == (2,), "") + + +def test_bad_bounds(): + lp = _LPProblem(c=[1, 2]) + + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) + assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2, 2)])) + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + assert_raises(ValueError, _clean_inputs, + lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)])) + + +def test_good_bounds(): + lp = _LPProblem(c=[1, 2]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[[]])) + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2) + + lp = _LPProblem(c=[1, 2, 3, 4]) + + lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default + assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) + assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) + assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4) + + lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), + (-np.inf, None), + (None, np.inf), + (-np.inf, np.inf)])) + assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..7f695d94569438233afd1c3b3d2db2e390654f01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py @@ -0,0 +1,815 @@ +import math +from itertools import product + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy.sparse import csr_matrix, csc_matrix, lil_matrix + +from scipy.optimize._numdiff import ( + _adjust_scheme_to_bounds, approx_derivative, check_derivative, + group_columns, _eps_for_method, _compute_absolute_step) + + +def test_group_columns(): + structure = [ + [1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0] + ] + for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]: + A = transform(structure) + order = np.arange(6) + groups_true = np.array([0, 1, 2, 0, 1, 2]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + order = [1, 2, 4, 3, 5, 0] + groups_true = np.array([2, 0, 1, 2, 0, 1]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + # Test repeatability. + groups_1 = group_columns(A) + groups_2 = group_columns(A) + assert_equal(groups_1, groups_2) + + +def test_correct_fp_eps(): + # check that relative step size is correct for FP size + EPS = np.finfo(np.float64).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float64, method), + relative_step[method]) + assert_allclose( + _eps_for_method(np.complex128, np.complex128, method), + relative_step[method] + ) + + # check another FP size + EPS = np.finfo(np.float32).eps + relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + + for method in ['2-point', '3-point', 'cs']: + assert_allclose( + _eps_for_method(np.float64, np.float32, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float64, method), + relative_step[method] + ) + assert_allclose( + _eps_for_method(np.float32, np.float32, method), + relative_step[method] + ) + + +class TestAdjustSchemeToBounds: + def test_no_bounds(self): + x0 = np.zeros(3) + h = np.full(3, 1e-2) + inf_lower = np.empty_like(x0) + inf_upper = np.empty_like(x0) + inf_lower.fill(-np.inf) + inf_upper.fill(np.inf) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + def test_with_bound(self): + x0 = np.array([0.0, 0.85, -0.85]) + lb = -np.ones(3) + ub = np.ones(3) + h = np.array([1, 1, -1]) * 1e-1 + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, h) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.abs(h)) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + assert_equal(one_sided, np.array([False, True, True])) + + def test_tight_bounds(self): + lb = np.array([-0.03, -0.03]) + ub = np.array([0.05, 0.05]) + x0 = np.array([0.0, 0.03]) + h = np.array([-0.1, -0.1]) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.05, -0.06])) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.025, -0.03])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.03, -0.03])) + assert_equal(one_sided, np.array([False, True])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.015, -0.015])) + assert_equal(one_sided, np.array([False, True])) + + +class TestApproxDerivativesDense: + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def wrong_dimensions_fun(self, x): + return np.array([x**2, np.tan(x), np.exp(x)]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def fun_parametrized(self, x, c0, c1=1.0): + return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])]) + + def jac_parametrized(self, x, c0, c1=0.1): + return np.array([ + [c0 * np.exp(c0 * x[0]), 0], + [0, c1 * np.exp(c1 * x[1])] + ]) + + def fun_with_nan(self, x): + return x if np.abs(x) <= 1e-8 else np.nan + + def jac_with_nan(self, x): + return 1.0 if np.abs(x) <= 1e-8 else np.nan + + def fun_zero_jacobian(self, x): + return np.array([x[0] * x[1], np.cos(x[0] * x[1])]) + + def jac_zero_jacobian(self, x): + return np.array([ + [x[1], x[0]], + [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])] + ]) + + def jac_non_numpy(self, x): + # x can be a scalar or an array [val]. + # Cast to true scalar before handing over to math.exp + xp = np.asarray(x).item() + return math.exp(xp) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs') + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + abs_step=1.49e-8) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs') + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs') + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-7) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar_abs_step(self): + # can approx_derivative use abs_step? + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', abs_step=1.49e-8) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + abs_step=1.49e-8, rel_step=np.inf) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', abs_step=1.49e-8) + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=3e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs') + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-5) + assert_allclose(jac_diff_3, jac_true, rtol=1e-6) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_wrong_dimensions(self): + x0 = 1.0 + assert_raises(RuntimeError, approx_derivative, + self.wrong_dimensions_fun, x0) + f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) + assert_raises(ValueError, approx_derivative, + self.wrong_dimensions_fun, x0, f0=f0) + + def test_custom_rel_step(self): + x0 = np.array([-0.1, 0.1]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', rel_step=1e-4) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + rel_step=1e-4) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-2) + assert_allclose(jac_diff_3, jac_true, rtol=1e-4) + + def test_options(self): + x0 = np.array([1.0, 1.0]) + c0 = -1.0 + c1 = 1.0 + lb = 0.0 + ub = 2.0 + f0 = self.fun_parametrized(x0, c0, c1=c1) + rel_step = np.array([-1e-6, 1e-7]) + jac_true = self.jac_parametrized(x0, c0, c1) + jac_diff_2 = approx_derivative( + self.fun_parametrized, x0, method='2-point', rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_parametrized, x0, rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_with_bounds_2_point(self): + lb = -np.ones(2) + ub = np.ones(2) + + x0 = np.array([-2.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, bounds=(lb, ub)) + + x0 = np.array([-1.0, 1.0]) + jac_diff = approx_derivative(self.fun_vector_vector, x0, + method='2-point', bounds=(lb, ub)) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + def test_with_bounds_3_point(self): + lb = np.array([1.0, 1.0]) + ub = np.array([2.0, 2.0]) + + x0 = np.array([1.0, 2.0]) + jac_true = self.jac_vector_vector(x0) + + jac_diff = approx_derivative(self.fun_vector_vector, x0) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, np.inf)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(-np.inf, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + def test_tight_bounds(self): + x0 = np.array([10.0, 10.0]) + lb = x0 - 3e-9 + ub = x0 + 2e-9 + jac_true = self.jac_vector_vector(x0) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', + rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + jac_diff = approx_derivative( + self.fun_vector_vector, x0, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_true, jac_diff, rtol=1e-6) + + def test_bound_switches(self): + lb = -1e-8 + ub = 1e-8 + x0 = 0.0 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + x0 = 1e-8 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_non_numpy(self): + x0 = 1.0 + jac_true = self.jac_non_numpy(x0) + jac_diff_2 = approx_derivative(self.jac_non_numpy, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.jac_non_numpy, x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-8) + + # math.exp cannot handle complex arguments, hence this raises + assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0, + **dict(method='cs')) + + def test_fp(self): + # checks that approx_derivative works for FP size other than 64. + # Example is derived from the minimal working example in gh12991. + np.random.seed(1) + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + x = np.linspace(0, 1, 100, dtype=np.float64) + y = np.random.random(100).astype(np.float64) + p0 = np.array([-1.0, -1.0]) + + jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y)) + + # parameter vector is float32, func output is float64 + jac_fp = approx_derivative(err, p0.astype(np.float32), + method='2-point', args=(x, y)) + assert err(p0, x, y).dtype == np.float64 + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # parameter vector is float64, func output is float32 + def err_fp32(p): + assert p.dtype == np.float32 + return err(p, x, y).astype(np.float32) + + jac_fp = approx_derivative(err_fp32, p0.astype(np.float32), + method='2-point') + assert_allclose(jac_fp, jac_fp64, atol=1e-3) + + # check upper bound of error on the derivative for 2-point + def f(x): + return np.sin(x) + def g(x): + return np.cos(x) + def hess(x): + return -np.sin(x) + + def calc_atol(h, x0, f, hess, EPS): + # truncation error + t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h))) + # roundoff error. There may be a divisor (>1) missing from + # the following line, so this contribution is possibly + # overestimated + t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h))) + return t0 + t1 + + for dtype in [np.float16, np.float32, np.float64]: + EPS = np.finfo(dtype).eps + x0 = np.array(1.0).astype(dtype) + h = _compute_absolute_step(None, x0, f(x0), '2-point') + atol = calc_atol(h, x0, f, hess, EPS) + err = approx_derivative(f, x0, method='2-point', + abs_step=h) - g(x0) + assert abs(err) < atol + + def test_check_derivative(self): + x0 = np.array([-10.0, 10]) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-9) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-6) + + x0 = np.array([0.0, 0.0]) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + + +class TestApproxDerivativeSparse: + # Example from Numerical Optimization 2nd edition, p. 198. + def setup_method(self): + np.random.seed(0) + self.n = 50 + self.lb = -0.1 * (1 + np.arange(self.n)) + self.ub = 0.1 * (1 + np.arange(self.n)) + self.x0 = np.empty(self.n) + self.x0[::2] = (1 - 1e-7) * self.lb[::2] + self.x0[1::2] = (1 - 1e-7) * self.ub[1::2] + + self.J_true = self.jac(self.x0) + + def fun(self, x): + e = x[1:]**3 - x[:-1]**2 + return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0)) + + def jac(self, x): + n = x.size + J = np.zeros((n, n)) + J[0, 0] = -4 * x[0] + J[0, 1] = 6 * x[1]**2 + for i in range(1, n - 1): + J[i, i - 1] = -6 * x[i-1] + J[i, i] = 9 * x[i]**2 - 4 * x[i] + J[i, i + 1] = 6 * x[i+1]**2 + J[-1, -1] = 9 * x[-1]**2 + J[-1, -2] = -6 * x[-2] + + return J + + def structure(self, n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + def test_all(self): + A = self.structure(self.n) + order = np.arange(self.n) + groups_1 = group_columns(A, order) + np.random.shuffle(order) + groups_2 = group_columns(A, order) + + for method, groups, l, u in product( + ['2-point', '3-point', 'cs'], [groups_1, groups_2], + [-np.inf, self.lb], [np.inf, self.ub]): + J = approx_derivative(self.fun, self.x0, method=method, + bounds=(l, u), sparsity=(A, groups)) + assert_(isinstance(J, csr_matrix)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + rel_step = np.full_like(self.x0, 1e-8) + rel_step[::2] *= -1 + J = approx_derivative(self.fun, self.x0, method=method, + rel_step=rel_step, sparsity=(A, groups)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-5) + + def test_no_precomputed_groups(self): + A = self.structure(self.n) + J = approx_derivative(self.fun, self.x0, sparsity=A) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + def test_equivalence(self): + structure = np.ones((self.n, self.n), dtype=int) + groups = np.arange(self.n) + for method in ['2-point', '3-point', 'cs']: + J_dense = approx_derivative(self.fun, self.x0, method=method) + J_sparse = approx_derivative( + self.fun, self.x0, sparsity=(structure, groups), method=method) + assert_allclose(J_dense, J_sparse.toarray(), + rtol=5e-16, atol=7e-15) + + def test_check_derivative(self): + def jac(x): + return csr_matrix(self.jac(x)) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + +class TestApproxDerivativeLinearOperator: + + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true*p, + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true*p, + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true*p, + rtol=5e-6) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), + rtol=5e-6) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-7) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_vector(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7) + + def test_exception(self): + x0 = np.array([-100.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, + method='2-point', bounds=(1, np.inf)) + + +def test_absolute_step_sign(): + # test for gh12487 + # if an absolute step is specified for 2-point differences make sure that + # the side corresponds to the step. i.e. if step is positive then forward + # differences should be used, if step is negative then backwards + # differences should be used. + + # function has double discontinuity at x = [-1, -1] + # first component is \/, second component is /\ + def f(x): + return -np.abs(x[0] + 1) + np.abs(x[1] + 1) + + # check that the forward difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8) + assert_allclose(grad, [-1.0, 1.0]) + + # check that the backwards difference is used + grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8) + assert_allclose(grad, [1.0, -1.0]) + + # check that the forwards difference is used with a step for both + # parameters + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8] + ) + assert_allclose(grad, [-1.0, 1.0]) + + # check that we can mix forward/backwards steps. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8] + ) + assert_allclose(grad, [-1.0, -1.0]) + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8] + ) + assert_allclose(grad, [1.0, 1.0]) + + # the forward step should reverse to a backwards step if it runs into a + # bound + # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level + # function. + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=1e-8, + bounds=(-np.inf, -1) + ) + assert_allclose(grad, [1.0, -1.0]) + + grad = approx_derivative( + f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf) + ) + assert_allclose(grad, [-1.0, 1.0]) + + +def test__compute_absolute_step(): + # tests calculation of absolute step from rel_step + methods = ['2-point', '3-point', 'cs'] + + x0 = np.array([1e-5, 0, 1, 1e5]) + + EPS = np.finfo(np.float64).eps + relative_step = { + "2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5 + } + f0 = np.array(1.0) + + for method in methods: + rel_step = relative_step[method] + correct_step = np.array([rel_step, + rel_step * 1., + rel_step * 1., + rel_step * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(None, x0, f0, method) + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(None, -x0, f0, method) + assert_allclose(abs_step, sign_x0 * correct_step) + + # if a relative step is provided it should be used + rel_step = np.array([0.1, 1, 10, 100]) + correct_step = np.array([rel_step[0] * x0[0], + relative_step['2-point'], + rel_step[2] * 1., + rel_step[3] * np.abs(x0[3])]) + + abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point') + assert_allclose(abs_step, correct_step) + + sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 + abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point') + assert_allclose(abs_step, sign_x0 * correct_step) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..817282011699dea333042a4173f65c999a2925fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py @@ -0,0 +1,228 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" + +# TODO: add tests for: +# https://github.com/scipy/scipy/issues/5400 +# https://github.com/scipy/scipy/issues/6690 + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal) + +from .test_linprog import magic_square +from scipy.optimize._remove_redundancy import _remove_redundancy_svd +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense +from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse +from scipy.optimize._remove_redundancy import _remove_redundancy_id + +from scipy.sparse import csc_matrix + + +def setup_module(): + np.random.seed(2017) + + +def redundancy_removed(A, B): + """Checks whether a matrix contains only independent rows of another""" + for rowA in A: + # `rowA in B` is not a reliable check + for rowB in B: + if np.all(rowA == rowB): + break + else: + return False + return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B) + + +class RRCommonTests: + def test_no_redundancy(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_allclose(A0, A1) + assert_allclose(b0, b1) + assert_equal(status, 0) + + def test_infeasible_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 2) + + def test_remove_zero_row(self): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + b[1] = 0 + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_allclose(A1, A[[0, 2], :]) + assert_allclose(b1, b[[0, 2]]) + + def test_infeasible_m_gt_n(self): + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_eq_n(self): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = 2 * A0[-2, :] + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_infeasible_m_lt_n(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 2) + + def test_m_gt_n(self): + np.random.seed(2032) + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + x = np.linalg.solve(A0[:n, :], b0[:n]) + b0[n:] = A0[n:, :].dot(x) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], n) + assert_equal(np.linalg.matrix_rank(A1), n) + + def test_m_gt_n_rank_deficient(self): + m, n = 20, 10 + A0 = np.zeros((m, n)) + A0[:, 0] = 1 + b0 = np.ones(m) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_allclose(A1, A0[0:1, :]) + assert_allclose(b1, b0[0]) + + def test_m_lt_n_rank_deficient(self): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + b0[-1] = np.arange(m - 1).dot(b0[:-1]) + A1, b1, status, message = self.rr(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], 8) + assert_equal(np.linalg.matrix_rank(A1), 8) + + def test_dense1(self): + A = np.ones((6, 6)) + A[0, :3] = 0 + A[1, 3:] = 0 + A[3:, ::2] = -1 + A[3, :2] = 0 + A[4, 2:] = 0 + b = np.zeros(A.shape[0]) + + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense2(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_dense3(self): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.random.rand(A.shape[0]) + b[-1] = np.sum(b[:-1]) + A1, b1, status, message = self.rr(A, b) + assert_(redundancy_removed(A1, A)) + assert_equal(status, 0) + + def test_m_gt_n_sparse(self): + np.random.seed(2013) + m, n = 20, 5 + p = 0.1 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_lt_n_sparse(self): + np.random.seed(2017) + m, n = 20, 50 + p = 0.05 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_m_eq_n_sparse(self): + np.random.seed(2017) + m, n = 100, 100 + p = 0.01 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + def test_magic_square(self): + A, b, c, numbers, _ = magic_square(3) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 23) + assert_equal(np.linalg.matrix_rank(A1), 23) + + def test_magic_square2(self): + A, b, c, numbers, _ = magic_square(4) + A1, b1, status, message = self.rr(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 39) + assert_equal(np.linalg.matrix_rank(A1), 39) + + +class TestRRSVD(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_svd(A, b) + + +class TestRRPivotDense(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_pivot_dense(A, b) + + +class TestRRID(RRCommonTests): + def rr(self, A, b): + return _remove_redundancy_id(A, b) + + +class TestRRPivotSparse(RRCommonTests): + def rr(self, A, b): + rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b) + A1, b1, status, message = rr_res + return A1.toarray(), b1, status, message diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..b739bc3d69195c8f13e07407db960e0407a2f26c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py @@ -0,0 +1,1159 @@ +import logging +import sys + +import numpy +import numpy as np +import time +from multiprocessing import Pool +from numpy.testing import assert_allclose, IS_PYPY +import pytest +from pytest import raises as assert_raises, warns +from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen, + rosen_der, rosen_hess, NonlinearConstraint) +from scipy.optimize._constraints import new_constraint_to_old +from scipy.optimize._shgo import SHGO + + +class StructTestFunction: + def __init__(self, bounds, expected_x, expected_fun=None, + expected_xl=None, expected_funl=None): + self.bounds = bounds + self.expected_x = expected_x + self.expected_fun = expected_fun + self.expected_xl = expected_xl + self.expected_funl = expected_funl + + +def wrap_constraints(g): + cons = [] + if g is not None: + if not isinstance(g, (tuple, list)): + g = (g,) + else: + pass + for g in g: + cons.append({'type': 'ineq', + 'fun': g}) + cons = tuple(cons) + else: + cons = None + return cons + + +class StructTest1(StructTestFunction): + def f(self, x): + return x[0] ** 2 + x[1] ** 2 + + def g(x): + return -(numpy.sum(x, axis=0) - 6.0) + + cons = wrap_constraints(g) + + +test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], + expected_x=[0, 0]) +test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], + expected_x=[0, 0]) +test1_3 = StructTest1(bounds=[(None, None), (None, None)], + expected_x=[0, 0]) + + +class StructTest2(StructTestFunction): + """ + Scalar function with several minima to test all minimiser retrievals + """ + + def f(self, x): + return (x - 30) * numpy.sin(x) + + def g(x): + return 58 - numpy.sum(x, axis=0) + + cons = wrap_constraints(g) + + +test2_1 = StructTest2(bounds=[(0, 60)], + expected_x=[1.53567906], + expected_fun=-28.44677132, + # Important: test that funl return is in the correct + # order + expected_xl=numpy.array([[1.53567906], + [55.01782167], + [7.80894889], + [48.74797493], + [14.07445705], + [42.4913859], + [20.31743841], + [36.28607535], + [26.43039605], + [30.76371366]]), + + expected_funl=numpy.array([-28.44677132, -24.99785984, + -22.16855376, -18.72136195, + -15.89423937, -12.45154942, + -9.63133158, -6.20801301, + -3.43727232, -0.46353338]) + ) + +test2_2 = StructTest2(bounds=[(0, 4.5)], + expected_x=[1.53567906], + expected_fun=[-28.44677132], + expected_xl=numpy.array([[1.53567906]]), + expected_funl=numpy.array([-28.44677132]) + ) + + +class StructTest3(StructTestFunction): + """ + Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981) + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + Minimize: f = 0.01 * (x_1)**2 + (x_2)**2 + + Subject to: x_1 * x_2 - 25.0 >= 0, + (x_1)**2 + (x_2)**2 - 25.0 >= 0, + 2 <= x_1 <= 50, + 0 <= x_2 <= 50. + + Approx. Answer: + f([(250)**0.5 , (2.5)**0.5]) = 5.0 + + + """ + + # amended to test vectorisation of constraints + def f(self, x): + return 0.01 * (x[0]) ** 2 + (x[1]) ** 2 + + def g1(x): + return x[0] * x[1] - 25.0 + + def g2(x): + return x[0] ** 2 + x[1] ** 2 - 25.0 + + # g = (g1, g2) + # cons = wrap_constraints(g) + + def g(x): + return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0 + + # this checks that shgo can be sent new-style constraints + __nlc = NonlinearConstraint(g, 0, np.inf) + cons = (__nlc,) + +test3_1 = StructTest3(bounds=[(2, 50), (0, 50)], + expected_x=[250 ** 0.5, 2.5 ** 0.5], + expected_fun=5.0 + ) + + +class StructTest4(StructTestFunction): + """ + Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981) + + NOTE: Did not find in original reference to HS collection, refer to + Henderson (2015) problem 7 instead. 02.03.2016 + """ + + def f(self, x): + return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4 + + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[ + 6] ** 4 + - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6] + ) + + def g1(x): + return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2 + + 5 * x[4] - 127) + + def g2(x): + return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0) + + def g3(x): + return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196) + + def g4(x): + return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2 + + 5 * x[5] - 11 * x[6]) + + g = (g1, g2, g3, g4) + + cons = wrap_constraints(g) + + +test4_1 = StructTest4(bounds=[(-10, 10), ] * 7, + expected_x=[2.330499, 1.951372, -0.4775414, + 4.365726, -0.6244870, 1.038131, 1.594227], + expected_fun=680.6300573 + ) + + +class StructTest5(StructTestFunction): + def f(self, x): + return (-(x[1] + 47.0) + * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + g = None + cons = wrap_constraints(g) + + +test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)], + expected_fun=[-959.64066272085051], + expected_x=[512., 404.23180542]) + + +class StructTestLJ(StructTestFunction): + """ + LennardJones objective function. Used to test symmetry constraints + settings. + """ + + def f(self, x, *args): + print(f'x = {x}') + self.N = args[0] + k = int(self.N / 3) + s = 0.0 + + for i in range(k - 1): + for j in range(i + 1, k): + a = 3 * i + b = 3 * j + xd = x[a] - x[b] + yd = x[a + 1] - x[b + 1] + zd = x[a + 2] - x[b + 2] + ed = xd * xd + yd * yd + zd * zd + ud = ed * ed * ed + if ed > 0.0: + s += (1.0 / ud - 2.0) / ud + + return s + + g = None + cons = wrap_constraints(g) + + +N = 6 +boundsLJ = list(zip([-4.0] * 6, [4.0] * 6)) + +testLJ = StructTestLJ(bounds=boundsLJ, + expected_fun=[-1.0], + expected_x=None, + # expected_x=[-2.71247337e-08, + # -2.71247337e-08, + # -2.50000222e+00, + # -2.71247337e-08, + # -2.71247337e-08, + # -1.50000222e+00] + ) + + +class StructTestS(StructTestFunction): + def f(self, x): + return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2 + + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2) + + g = None + cons = wrap_constraints(g) + + +test_s = StructTestS(bounds=[(0, 2.0), ] * 4, + expected_fun=0.0, + expected_x=numpy.ones(4) - 0.5 + ) + + +class StructTestTable(StructTestFunction): + def f(self, x): + if x[0] == 3.0 and x[1] == 3.0: + return 50 + else: + return 100 + + g = None + cons = wrap_constraints(g) + + +test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)], + expected_fun=[50], + expected_x=[3.0, 3.0]) + + +class StructTestInfeasible(StructTestFunction): + """ + Test function with no feasible domain. + """ + + def f(self, x, *args): + return x[0] ** 2 + x[1] ** 2 + + def g1(x): + return x[0] + x[1] - 1 + + def g2(x): + return -(x[0] + x[1] - 1) + + def g3(x): + return -x[0] + x[1] - 1 + + def g4(x): + return -(-x[0] + x[1] - 1) + + g = (g1, g2, g3, g4) + cons = wrap_constraints(g) + + +test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)], + expected_fun=None, + expected_x=None + ) + + +@pytest.mark.skip("Not a test") +def run_test(test, args=(), test_atol=1e-5, n=100, iters=None, + callback=None, minimizer_kwargs=None, options=None, + sampling_method='sobol', workers=1): + res = shgo(test.f, test.bounds, args=args, constraints=test.cons, + n=n, iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, options=options, + sampling_method=sampling_method, workers=workers) + + print(f'res = {res}') + logging.info(f'res = {res}') + if test.expected_x is not None: + numpy.testing.assert_allclose(res.x, test.expected_x, + rtol=test_atol, + atol=test_atol) + + # (Optional tests) + if test.expected_fun is not None: + numpy.testing.assert_allclose(res.fun, + test.expected_fun, + atol=test_atol) + + if test.expected_xl is not None: + numpy.testing.assert_allclose(res.xl, + test.expected_xl, + atol=test_atol) + + if test.expected_funl is not None: + numpy.testing.assert_allclose(res.funl, + test.expected_funl, + atol=test_atol) + return + + +# Base test functions: +class TestShgoSobolTestFunctions: + """ + Global optimisation tests with Sobol sampling: + """ + + # Sobol algorithm + def test_f1_1_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1) + + def test_f1_2_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2) + + def test_f1_3_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" + options = {'disp': True} + run_test(test1_3, options=options) + + def test_f2_1_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + run_test(test2_1) + + def test_f2_2_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2) + + def test_f3_sobol(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1) + + @pytest.mark.slow + def test_f4_sobol(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + options = {'infty_constraints': False} + # run_test(test4_1, n=990, options=options) + run_test(test4_1, n=990 * 2, options=options) + + def test_f5_1_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=30) + run_test(test5_1, n=60) + + def test_f5_2_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=60, iters=5) + run_test(test5_1, n=60, iters=5) + + # def test_t911(self): + # """1D tabletop function""" + # run_test(test11_1) + + +class TestShgoSimplicialTestFunctions: + """ + Global optimisation tests with Simplicial sampling: + """ + + def test_f1_1_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1, n=1, sampling_method='simplicial') + + def test_f1_2_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2, n=1, sampling_method='simplicial') + + def test_f1_3_simplicial(self): + """Multivariate test function 1: x[0]**2 + x[1]**2 + with bounds=[(None, None),(None, None)]""" + run_test(test1_3, n=5, sampling_method='simplicial') + + def test_f2_1_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=200, iters=7, options=options, + sampling_method='simplicial') + + def test_f2_2_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2, n=1, sampling_method='simplicial') + + def test_f3_simplicial(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1, n=1, sampling_method='simplicial') + + @pytest.mark.slow + def test_f4_simplicial(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + run_test(test4_1, n=1, sampling_method='simplicial') + + def test_lj_symmetry_old(self): + """LJ: Symmetry-constrained test function""" + options = {'symmetry': True, + 'disp': True} + args = (6,) # Number of atoms + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_1_lj_symmetry(self): + """LJ: Symmetry constrained test function""" + options = {'symmetry': [0, ] * 6, + 'disp': True} + args = (6,) # No. of atoms + + run_test(testLJ, args=args, n=300, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_2_cons_symmetry(self): + """Symmetry constrained test function""" + options = {'symmetry': [0, 0], + 'disp': True} + + run_test(test1_1, n=200, + options=options, iters=1, + sampling_method='simplicial') + + def test_f5_3_cons_symmetry(self): + """Assymmetrically constrained test function""" + options = {'symmetry': [0, 0, 0, 3], + 'disp': True} + + run_test(test_s, n=10000, + options=options, + iters=1, + sampling_method='simplicial') + + @pytest.mark.skip("Not a test") + def test_f0_min_variance(self): + """Return a minimum on a perfectly symmetric problem, based on + gh10429""" + avg = 0.5 # Given average value of x + cons = {'type': 'eq', 'fun': lambda x: numpy.mean(x) - avg} + + # Minimize the variance of x under the given constraint + res = shgo(numpy.var, bounds=6 * [(0, 1)], constraints=cons) + assert res.success + assert_allclose(res.fun, 0, atol=1e-15) + assert_allclose(res.x, 0.5) + + @pytest.mark.skip("Not a test") + def test_f0_min_variance_1D(self): + """Return a minimum on a perfectly symmetric 1D problem, based on + gh10538""" + + def fun(x): + return x * (x - 1.0) * (x - 0.5) + + bounds = [(0, 1)] + res = shgo(fun, bounds=bounds) + ref = minimize_scalar(fun, bounds=bounds[0]) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, rtol=1e-6) + +# Argument test functions +class TestShgoArguments: + def test_1_1_simpl_iter(self): + """Iterative simplicial sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=2, sampling_method='simplicial') + + def test_1_2_simpl_iter(self): + """Iterative simplicial on TestFunction 2 (univariate)""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=None, iters=9, options=options, + sampling_method='simplicial') + + def test_2_1_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=1, sampling_method='sobol') + + def test_2_2_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 2 (univariate)""" + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=1, sampling_method='sobol') + + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5) + + def test_3_1_disp_simplicial(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate) + """ + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, + sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_3_2_disp_sobol(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + shgo(test.f, test.bounds, iters=1, sampling_method='sobol', + callback=callback_func, options={'disp': True}) + + shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_args_gh14589(self): + """Using `args` used to cause `shgo` to fail; see #14589, #15986, + #16506""" + res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2) + ) + ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)]) + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + @pytest.mark.slow + def test_4_1_known_f_min(self): + """Test known function minima stopping criteria""" + # Specify known function value + options = {'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + 'minimize_every_iter': True} + # TODO: Make default n higher for faster tests + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + @pytest.mark.slow + def test_4_2_known_f_min(self): + """Test Global mode limiting local evaluations""" + options = { # Specify known function value + 'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform + 'minimize_every_iter': True, + 'local_iter': 1} + + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + def test_4_4_known_f_min(self): + """Test Global mode limiting local evaluations for 1D funcs""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=None, options=options, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_1_simplicial_argless(self): + """Test Default simplicial sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons) + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_2_sobol_argless(self): + """Test Default sobol sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_6_1_simplicial_max_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'max_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_6_2_simplicial_min_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'min_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_7_1_minkwargs(self): + """Test the minimizer_kwargs arguments for solvers with constraints""" + # Test solvers + for solver in ['COBYLA', 'SLSQP']: + # Note that passing global constraints to SLSQP is tested in other + # unittests which run test4_1 normally + minimizer_kwargs = {'method': solver, + 'constraints': test3_1.cons} + run_test(test3_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_7_2_minkwargs(self): + """Test the minimizer_kwargs default inits""" + minimizer_kwargs = {'ftol': 1e-5} + options = {'disp': True} # For coverage purposes + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0], + minimizer_kwargs=minimizer_kwargs, options=options) + + def test_7_3_minkwargs(self): + """Test minimizer_kwargs arguments for solvers without constraints""" + for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', + 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']: + def jac(x): + return numpy.array([2 * x[0], 2 * x[1]]).T + + def hess(x): + return numpy.array([[2, 0], [0, 2]]) + + minimizer_kwargs = {'method': solver, + 'jac': jac, + 'hess': hess} + logging.info(f"Solver = {solver}") + logging.info("=" * 100) + run_test(test1_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, + sampling_method='sobol') + + def test_8_homology_group_diff(self): + options = {'minhgrd': 1, + 'minimize_every_iter': True} + + run_test(test1_1, n=None, iters=None, options=options, + sampling_method='simplicial') + + def test_9_cons_g(self): + """Test single function constraint passing""" + SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0]) + + @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32', + reason="Failing and fix in PyPy not planned (see gh-18632)") + def test_10_finite_time(self): + """Test single function constraint passing""" + options = {'maxtime': 1e-15} + + def f(x): + time.sleep(1e-14) + return 0.0 + + res = shgo(f, test1_1.bounds, iters=5, options=options) + # Assert that only 1 rather than 5 requested iterations ran: + assert res.nit == 1 + + def test_11_f_min_0(self): + """Test to cover the case where f_lowest == 0""" + options = {'f_min': 0.0, + 'disp': True} + res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None, + options=options, sampling_method='sobol') + numpy.testing.assert_equal(0, res.x[0]) + numpy.testing.assert_equal(0, res.x[1]) + + # @nottest + @pytest.mark.skip(reason="no way of currently testing this") + def test_12_sobol_inf_cons(self): + """Test to cover the case where f_lowest == 0""" + # TODO: This test doesn't cover anything new, it is unknown what the + # original test was intended for as it was never complete. Delete or + # replace in the future. + options = {'maxtime': 1e-15, + 'f_min': 0.0} + res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + numpy.testing.assert_equal(0.0, res.fun) + + def test_13_high_sobol(self): + """Test init of high-dimensional sobol sequences""" + + def f(x): + return 0 + + bounds = [(None, None), ] * 41 + SHGOc = SHGO(f, bounds, sampling_method='sobol') + # SHGOc.sobol_points(2, 50) + SHGOc.sampling_function(2, 50) + + def test_14_local_iter(self): + """Test limited local iterations for a pseudo-global mode""" + options = {'local_iter': 4} + run_test(test5_1, n=60, options=options) + + def test_15_min_every_iter(self): + """Test minimize every iter options and cover function cache""" + options = {'minimize_every_iter': True} + run_test(test1_1, n=1, iters=7, options=options, + sampling_method='sobol') + + def test_16_disp_bounds_minimizer(self, capsys): + """Test disp=True with minimizers that do not support bounds """ + options = {'disp': True} + minimizer_kwargs = {'method': 'nelder-mead'} + run_test(test1_2, sampling_method='simplicial', + options=options, minimizer_kwargs=minimizer_kwargs) + + def test_17_custom_sampling(self): + """Test the functionality to add custom sampling methods to shgo""" + + def sample(n, d): + return numpy.random.uniform(size=(n, d)) + + run_test(test1_1, n=30, sampling_method=sample) + + def test_18_bounds_class(self): + # test that new and old bounds yield same result + def f(x): + return numpy.square(x).sum() + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = shgo(f, bounds_old) + res_new_bounds = shgo(f, bounds_new) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + x_opt = numpy.array([-1., 1., 0.]) + numpy.testing.assert_allclose(res_new_bounds.x, x_opt) + numpy.testing.assert_allclose(res_new_bounds.x, + res_old_bounds.x) + + def test_19_parallelization(self): + """Test the functionality to add custom sampling methods to shgo""" + + with Pool(2) as p: + run_test(test1_1, n=30, workers=p.map) # Constrained + run_test(test1_1, n=30, workers=map) # Constrained + with Pool(2) as p: + run_test(test_s, n=30, workers=p.map) # Unconstrained + run_test(test_s, n=30, workers=map) # Unconstrained + + def test_20_constrained_args(self): + """Test that constraints can be passed to arguments""" + + def eggholder(x): + return (-(x[1] + 47.0) + * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + def f(x): # (cattle-feed) + return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3] + + bounds = [(0, 1.0), ] * 4 + + def g1_modified(x, i): + return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[ + 3] - 5 # >=0 + + def g2(x): + return (12 * x[0] + 11.9 * x[1] + 41.8 * x[2] + 52.1 * x[3] - 21 + - 1.645 * numpy.sqrt(0.28 * x[0] ** 2 + 0.19 * x[1] ** 2 + + 20.5 * x[2] ** 2 + 0.62 * x[3] ** 2) + ) # >=0 + + def h1(x): + return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + + cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)}, + {'type': 'ineq', 'fun': g2}, + {'type': 'eq', 'fun': h1}) + + shgo(f, bounds, n=300, iters=1, constraints=cons) + # using constrain with arguments AND sampling method sobol + shgo(f, bounds, n=300, iters=1, constraints=cons, + sampling_method='sobol') + + def test_21_1_jac_true(self): + """Test that shgo can handle objective functions that return the + gradient alongside the objective value. Fixes gh-13547""" + # previous + def func(x): + return numpy.sum(numpy.power(x, 2)), 2 * x + + shgo( + func, + bounds=[[-1, 1], [1, 2]], + n=100, iters=5, + sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True} + ) + + # new + def func(x): + return numpy.sum(x ** 2), 2 * x + + bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]] + + res = shgo(func, bounds=bounds, sampling_method="sobol", + minimizer_kwargs={'method': 'SLSQP', 'jac': True}) + ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds, + jac=True) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp']) + def test_21_2_derivative_options(self, derivative): + """shgo used to raise an error when passing `options` with 'jac' + # see gh-12963. check that this is resolved + """ + + def objective(x): + return 3 * x[0] * x[0] + 2 * x[0] + 5 + + def gradient(x): + return 6 * x[0] + 2 + + def hess(x): + return 6 + + def hessp(x, p): + return 6 * p + + derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp} + options = {derivative: derivative_funcs[derivative]} + minimizer_kwargs = {'method': 'trust-constr'} + + bounds = [(-100, 100)] + res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs, + **options) + + assert res.success + numpy.testing.assert_allclose(res.fun, ref.fun) + numpy.testing.assert_allclose(res.x, ref.x) + + def test_21_3_hess_options_rosen(self): + """Ensure the Hessian gets passed correctly to the local minimizer + routine. Previous report gh-14533. + """ + bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)] + options = {'jac': rosen_der, 'hess': rosen_hess} + minimizer_kwargs = {'method': 'Newton-CG'} + res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs, + options=options) + ref = minimize(rosen, numpy.zeros(5), method='Newton-CG', + **options) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x, atol=1e-15) + + def test_21_arg_tuple_sobol(self): + """shgo used to raise an error when passing `args` with Sobol sampling + # see gh-12114. check that this is resolved""" + + def fun(x, k): + return x[0] ** k + + constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1}) + + bounds = [(0, 10)] + res = shgo(fun, bounds, args=(1,), constraints=constraints, + sampling_method='sobol') + ref = minimize(fun, numpy.zeros(1), bounds=bounds, args=(1,), + constraints=constraints) + assert res.success + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +# Failure test functions +class TestShgoFailures: + def test_1_maxiter(self): + """Test failure on insufficient iterations""" + options = {'maxiter': 2} + res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None, + options=options, sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + # numpy.testing.assert_equal(4, res.nfev) + numpy.testing.assert_equal(4, res.tnev) + + def test_2_sampling(self): + """Rejection of unknown sampling method""" + assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds, + sampling_method='not_Sobol') + + def test_3_1_no_min_pool_sobol(self): + """Check that the routine stops when no minimiser is found + after maximum specified function evaluations""" + options = {'maxfev': 10, + # 'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='sobol') + numpy.testing.assert_equal(False, res.success) + # numpy.testing.assert_equal(9, res.nfev) + numpy.testing.assert_equal(12, res.nfev) + + def test_3_2_no_min_pool_simplicial(self): + """Check that the routine stops when no minimiser is found + after maximum specified sampling evaluations""" + options = {'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='simplicial') + numpy.testing.assert_equal(False, res.success) + + def test_4_1_bound_err(self): + """Specified bounds ub > lb""" + bounds = [(6, 3), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_4_2_bound_err(self): + """Specified bounds are of the form (lb, ub)""" + bounds = [(3, 5, 5), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_5_1_1_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Use infty constraints option""" + options = {'maxev': 100, + 'disp': True} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_1_2_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Do not use infty constraints option""" + options = {'maxev': 100, + 'disp': True, + 'infty_constraints': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_2_infeasible_simplicial(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded.""" + options = {'maxev': 1000, + 'disp': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='simplicial') + + numpy.testing.assert_equal(False, res.success) + + def test_6_1_lower_known_f_min(self): + """Test Global mode limiting local evaluations with f* too high""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun + 2.0, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + args = (test2_1.f, test2_1.bounds) + kwargs = {'constraints': test2_1.cons, + 'n': None, + 'iters': None, + 'options': options, + 'sampling_method': 'sobol' + } + warns(UserWarning, shgo, *args, **kwargs) + + def test(self): + from scipy.optimize import rosen, shgo + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + print(result.x, result.fun, fun.nfev) # 50 + + +# Returns +class TestShgoReturns: + def test_1_nfev_simplicial(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds) + numpy.testing.assert_equal(fun.nfev, result.nfev) + + def test_1_nfev_sobol(self): + bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] + + def fun(x): + fun.nfev += 1 + return rosen(x) + + fun.nfev = 0 + + result = shgo(fun, bounds, sampling_method='sobol') + numpy.testing.assert_equal(fun.nfev, result.nfev) + + +def test_vector_constraint(): + # gh15514 + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.2], [3]) + oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0])) + + res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol') + assert np.all(np.sum((res.x)**2) >= 2.2) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +@pytest.mark.filterwarnings("ignore:delta_grad") +def test_trust_constr(): + def quad(x): + x = np.asarray(x) + return [np.sum(x ** 2)] + + nlc = NonlinearConstraint(quad, [2.6], [3]) + minimizer_kwargs = {'method': 'trust-constr'} + # note that we don't supply the constraints in minimizer_kwargs, + # so if the final result obeys the constraints we know that shgo + # passed them on to 'trust-constr' + res = shgo( + rosen, + [(0, 10), (0, 10)], + constraints=nlc, + sampling_method='sobol', + minimizer_kwargs=minimizer_kwargs + ) + assert np.all(np.sum((res.x)**2) >= 2.6) + assert np.all(np.sum((res.x) ** 2) <= 3.0) + assert res.success + + +def test_equality_constraints(): + # gh16260 + bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1. + + def faulty(x): + return x[0] + x[1] + + nlc = NonlinearConstraint(faulty, 3.9, 3.9) + res = shgo(rosen, bounds=bounds, constraints=nlc) + assert_allclose(np.sum(res.x), 3.9) + + def faulty(x): + return x[0] + x[1] - 3.9 + + constraints = {'type': 'eq', 'fun': faulty} + res = shgo(rosen, bounds=bounds, constraints=constraints) + assert_allclose(np.sum(res.x), 3.9) + + bounds = [(0, 1.0)] * 4 + # sum of variable should equal 1. + def faulty(x): + return x[0] + x[1] + x[2] + x[3] - 1 + + # options = {'minimize_every_iter': True, 'local_iter':10} + constraints = {'type': 'eq', 'fun': faulty} + res = shgo( + lambda x: - np.prod(x), + bounds=bounds, + constraints=constraints, + sampling_method='sobol' + ) + assert_allclose(np.sum(res.x), 1.0) + +def test_gh16971(): + def cons(x): + return np.sum(x**2) - 0 + + c = {'fun': cons, 'type': 'ineq'} + minimizer_kwargs = { + 'method': 'COBYLA', + 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05} + } + + s = SHGO( + rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs + ) + + assert s.minimizer_kwargs['method'].lower() == 'cobyla' + assert s.minimizer_kwargs['options']['catol'] == 0.05 diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4dc52cc20caf0206fe53933d4dfc6d0fbb2c34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py @@ -0,0 +1,226 @@ +import itertools + +import numpy as np +from numpy import exp +from numpy.testing import assert_, assert_equal + +from scipy.optimize import root + + +def test_performance(): + # Compare performance results to those listed in + # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)] + # and + # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)]. + # and those produced by dfsane.f from M. Raydan's website. + # + # Where the results disagree, the largest limits are taken. + + e_a = 1e-5 + e_r = 1e-4 + + table_1 = [ + dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5), + dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2), + dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11), + dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11), + # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed: + # too sensitive to rounding errors + # Results from dfsane.f; papers list nit=3, nfev=3 + dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), + # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), + # Results from dfsane.f; papers list nit=nfev=6? + dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), + dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18), + # Results from dfsane.f; papers list nit=2, nfev=12 + dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), + ] + + # Check also scaling invariance + for xscale, yscale, line_search in itertools.product( + [1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng'] + ): + for problem in table_1: + n = problem['n'] + def func(x, n): + return yscale * problem['F'](x / xscale, n) + args = (n,) + x0 = problem['x0'](n) * xscale + + fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n)) + + sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale) + sigma_0 = xscale/yscale + + with np.errstate(over='ignore'): + sol = root(func, x0, args=args, + options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1, + sigma_0=sigma_0, sigma_eps=sigma_eps, + line_search=line_search), + method='DF-SANE') + + err_msg = repr( + [xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)), + fatol, sol.success, sol.nit, sol.nfev] + ) + assert sol.success, err_msg + # nfev+1: dfsane.f doesn't count first eval + assert sol.nfev <= problem['nfev'] + 1, err_msg + assert sol.nit <= problem['nit'], err_msg + assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg + + +def test_complex(): + def func(z): + return z**2 - 1 + 2j + x0 = 2.0j + + ftol = 1e-4 + sol = root(func, x0, tol=ftol, method='DF-SANE') + + assert_(sol.success) + + f0 = np.linalg.norm(func(x0)) + fx = np.linalg.norm(func(sol.x)) + assert_(fx <= ftol*f0) + + +def test_linear_definite(): + # The DF-SANE paper proves convergence for "strongly isolated" + # solutions. + # + # For linear systems F(x) = A x - b = 0, with A positive or + # negative definite, the solution is strongly isolated. + + def check_solvability(A, b, line_search='cruz'): + def func(x): + return A.dot(x) - b + xp = np.linalg.solve(A, b) + eps = np.linalg.norm(func(xp)) * 1e3 + sol = root( + func, b, + options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search), + method='DF-SANE', + ) + assert_(sol.success) + assert_(np.linalg.norm(func(sol.x)) <= eps) + + n = 90 + + # Test linear pos.def. system + np.random.seed(1234) + A = np.arange(n*n).reshape(n, n) + A = A + n*n * np.diag(1 + np.arange(n)) + assert_(np.linalg.eigvals(A).min() > 0) + b = np.arange(n) * 1.0 + check_solvability(A, b, 'cruz') + check_solvability(A, b, 'cheng') + + # Test linear neg.def. system + check_solvability(-A, b, 'cruz') + check_solvability(-A, b, 'cheng') + + +def test_shape(): + def f(x, arg): + return x - arg + + for dt in [float, complex]: + x = np.zeros([2,2]) + arg = np.ones([2,2], dtype=dt) + + sol = root(f, x, args=(arg,), method='DF-SANE') + assert_(sol.success) + assert_equal(sol.x.shape, x.shape) + + +# Some of the test functions and initial guesses listed in +# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)] + +def F_1(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0] - 1) - 1 + g[1:] = i*(exp(x[1:] - 1) - x[1:]) + return g + +def x0_1(n): + x0 = np.empty([n]) + x0.fill(n/(n-1)) + return x0 + +def F_2(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0]) - 1 + g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1) + return g + +def x0_2(n): + x0 = np.empty([n]) + x0.fill(1/n**2) + return x0 + + +def F_4(x, n): # skip name check + assert_equal(n % 3, 0) + g = np.zeros([n]) + # Note: the first line is typoed in some of the references; + # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] + g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 + g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] + - x[2::3] + 0.2 * x[2::3]**3 + 2.16) + g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 + return g + + +def x0_4(n): # skip name check + assert_equal(n % 3, 0) + x0 = np.array([-1, 1/2, -1] * (n//3)) + return x0 + +def F_6(x, n): + c = 0.9 + mu = (np.arange(1, n+1) - 0.5)/n + return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1)) + +def x0_6(n): + return np.ones([n]) + +def F_7(x, n): + assert_equal(n % 3, 0) + + def phi(t): + v = 0.5*t - 2 + v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1] + v[t >= 2] = (0.5*t + 2)[t >= 2] + return v + g = np.zeros([n]) + g[::3] = 1e4 * x[1::3]**2 - 1 + g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001 + g[2::3] = phi(x[2::3]) + return g + +def x0_7(n): + assert_equal(n % 3, 0) + return np.array([1e-3, 18, 1] * (n//3)) + +def F_9(x, n): + g = np.zeros([n]) + i = np.arange(2, n) + g[0] = x[0]**3/3 + x[1]**2/2 + g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2 + g[-1] = -x[-1]**2/2 + n*x[-1]**3/3 + return g + +def x0_9(n): + return np.ones([n]) + +def F_10(x, n): + return np.log(1 + x) - x/n + +def x0_10(n): + return np.ones([n]) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..0a55a8b13d6b82108bd65b8ff2d08076a290af8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py @@ -0,0 +1,780 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose, assert_equal + +from scipy.optimize._bracket import _bracket_root, _bracket_minimum, _ELIMITS +import scipy._lib._elementwise_iterative_method as eim +from scipy import stats + +class TestBracketRoot: + @pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752)) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + @pytest.mark.parametrize("fix_one_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side): + # Property-based test to confirm that _bracket_root is behaving as + # expected. The basic case is when root < a < b. + # The number of times bracket expands (per side) can be found by + # setting the expression for the left endpoint of the bracket to the + # root of f (x=0), solving for i, and rounding up. The corresponding + # lower and upper ends of the bracket are found by plugging this back + # into the expression for the ends of the bracket. + # `other_side=True` is the case that a < b < root + # Special cases like a < root < b are tested separately + + rng = np.random.default_rng(seed) + xl0, d, factor = rng.random(size=3) * [1e5, 10, 5] + factor = 1 + factor # factor must be greater than 1 + xr0 = xl0 + d # xr0 must be greater than a in basic case + + def f(x): + f.count += 1 + return x # root is 0 + + if use_xmin: + xmin = -rng.random() + n = np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor)) + l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin) + else: + n = np.ceil(np.log(xr0/d) / np.log(factor)) + l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1) + kwargs = dict(xl0=xl0, xr0=xr0, factor=factor) + + if other_side: + kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0'] + l, u = -u, -l + if 'xmin' in kwargs: + kwargs['xmax'] = -kwargs.pop('xmin') + + if fix_one_side: + if other_side: + kwargs['xmin'] = -xr0 + else: + kwargs['xmax'] = xr0 + + f.count = 0 + res = _bracket_root(f, **kwargs) + + # Compare reported number of function evaluations `nfev` against + # reported `nit`, actual function call count `f.count`, and theoretical + # number of expansions `n`. + # When both sides are free, these get multiplied by 2 because function + # is evaluated on the left and the right each iteration. + # When one side is fixed, however, we add one: on the right side, the + # function gets evaluated once at b. + # Add 1 to `n` and `res.nit` because function evaluations occur at + # iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because + # function is called separately for left and right in iteration 0. + if not fix_one_side: + assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1) + else: + assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1 + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + bracket = np.asarray([res.xl, res.xr]) + assert_allclose(bracket, (l, u)) + f_bracket = np.asarray([res.fl, res.fr]) + assert_allclose(f_bracket, f(bracket)) + + # Check that bracket is valid and that status and success are correct + assert res.xr > res.xl + signs = np.sign(f_bracket) + assert signs[0] == -signs[1] + assert res.status == 0 + assert res.success + + def f(self, q, p): + return stats.norm.cdf(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)]) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize('factor', [1.2, 2]) + def test_basic(self, p, xmin, xmax, factor): + # Test basic functionality to bracket root (distribution PPF) + res = _bracket_root(self.f, -0.01, 0.01, xmin=xmin, xmax=xmax, + factor=factor, args=(p,)) + assert_equal(-np.sign(res.fl), np.sign(res.fr)) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (p,) + maxiter = 10 + + @np.vectorize + def bracket_root_single(xl0, xr0, xmin, xmax, factor, p): + return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax, + factor=factor, args=(p,), + maxiter=maxiter) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor, + args=args, maxiter=maxiter) + refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel() + + attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + if shape: + assert np.all(res.success[1:-1]) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.nit.dtype, np.integer) + assert_equal(np.max(res.nit), f.f_evals - 2) + assert_array_less(res.xl, res.xr) + assert_allclose(res.fl, self.f(res.xl, *args)) + assert_allclose(res.fr, self.f(res.xr, *args)) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: x - 1.5, + lambda x: x - 1000, + lambda x: x - 1000, + lambda x: np.nan] + + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + res = _bracket_root(f, xl0=[-1, -1, -1, -1], xr0=[1, 1, 1, 1], + xmin=[-np.inf, -1, -np.inf, -np.inf], + xmax=[np.inf, 1, np.inf, np.inf], + args=args, maxiter=3) + + ref_flags = np.array([eim._ECONVERGED, + _ELIMITS, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize('xmin', [-5, None]) + @pytest.mark.parametrize('xmax', [5, None]) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, root, xmin, xmax, dtype): + # Test that dtypes are preserved + + xmin = xmin if xmin is None else dtype(xmin) + xmax = xmax if xmax is None else dtype(xmax) + root = dtype(root) + def f(x, root): + return ((x - root) ** 3).astype(dtype) + + bracket = np.asarray([-0.01, 0.01], dtype=dtype) + res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,)) + assert np.all(res.success) + assert res.xl.dtype == res.xr.dtype == dtype + assert res.fl.dtype == res.fr.dtype == dtype + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_root(None, -4, 4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4+1j, 4) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 'hello') + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, factor=0.5) + + message = '`xmin <= xl0 < xr0 <= xmax` must be True' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, 4, -4) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmax=np.nan) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, xmin=10) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, [-2, -3], [3, 4, 5]) + # Consider making this give a more readable error message + # with pytest.raises(ValueError, match=message): + # _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_root(lambda x: x, -4, 4, maxiter=-1) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = _bracket_root(f, -7, 5) + assert res.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x - 10 + + bracket = (-3, 5) + res = _bracket_root(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 2 + assert res.status == -2 + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _bracket_root(f, -1, 1, args=3) + assert res.success + assert_allclose(res.fl, f(res.xl, 3)) + + # Test other edge cases + + def f(x): + f.count += 1 + return x + + # 1. root lies within guess of bracket + f.count = 0 + _bracket_root(f, -10, 20) + assert_equal(f.count, 2) + + # 2. bracket endpoint hits root exactly + f.count = 0 + res = _bracket_root(f, 5, 10, factor=2) + bracket = (res.xl, res.xr) + assert_equal(res.nfev, 4) + assert_allclose(bracket, (0, 5), atol=1e-15) + + # 3. bracket limit hits root exactly + with np.errstate(over='ignore'): + res = _bracket_root(f, 5, 10, xmin=0) + bracket = (res.xl, res.xr) + assert_allclose(bracket[0], 0, atol=1e-15) + with np.errstate(over='ignore'): + res = _bracket_root(f, -10, -5, xmax=0) + bracket = (res.xl, res.xr) + assert_allclose(bracket[1], 0, atol=1e-15) + + # 4. bracket not within min, max + with np.errstate(over='ignore'): + res = _bracket_root(f, 5, 10, xmin=1) + assert not res.success + + +class TestBracketMinimum: + def init_f(self): + def f(x, a, b): + f.count += 1 + return (x - a)**2 + b + f.count = 0 + return f + + def assert_valid_bracket(self, result): + assert np.all( + (result.xl < result.xm) & (result.xm < result.xr) + ) + assert np.all( + (result.fl >= result.fm) & (result.fr > result.fm) + | (result.fl > result.fm) & (result.fr > result.fm) + ) + + def get_kwargs( + self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=() + ): + names = ("xl0", "xr0", "xmin", "xmax", "factor", "args") + return { + name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args)) + if isinstance(val, np.ndarray) or np.isscalar(val) + or val not in [None, ()] + } + + @pytest.mark.parametrize( + "seed", + ( + 307448016549685229886351382450158984917, + 11650702770735516532954347931959000479, + 113767103358505514764278732330028568336, + ) + ) + @pytest.mark.parametrize("use_xmin", (False, True)) + @pytest.mark.parametrize("other_side", (False, True)) + def test_nfev_expected(self, seed, use_xmin, other_side): + rng = np.random.default_rng(seed) + args = (0, 0) # f(x) = x^2 with minimum at 0 + # xl0, xm0, xr0 are chosen such that the initial bracket is to + # the right of the minimum, and the bracket will expand + # downhill towards zero. + xl0, d1, d2, factor = rng.random(size=4) * [1e5, 10, 10, 5] + xm0 = xl0 + d1 + xr0 = xm0 + d2 + # Factor should be greater than one. + factor += 1 + + if use_xmin: + xmin = -rng.random() * 5 + n = int(np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor))) + lower = xmin + (xl0 - xmin)*factor**-n + middle = xmin + (xl0 - xmin)*factor**-(n-1) + upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xmin + (xl0 - xmin)*factor**-n, lower, middle + ) + else: + xmin = None + n = int(np.ceil(np.log(xl0 / d1) / np.log(factor))) + lower = xl0 - d1*factor**n + middle = xl0 - d1*factor**(n-1) if n > 1 else xl0 + upper = xl0 - d1*factor**(n-2) if n > 1 else xm0 + # It may be the case the lower is below the minimum, but we still + # don't have a valid bracket. + if middle**2 > lower**2: + n += 1 + lower, middle, upper = ( + xl0 - d1*factor**n, lower, middle + ) + f = self.init_f() + + xmax = None + if other_side: + xl0, xm0, xr0 = -xr0, -xm0, -xl0 + xmin, xmax = None, -xmin if xmin is not None else None + lower, middle, upper = -upper, -middle, -lower + + kwargs = self.get_kwargs( + xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args + ) + result = _bracket_minimum(f, xm0, **kwargs) + + # Check that `nfev` and `nit` have the correct relationship + assert result.nfev == result.nit + 3 + # Check that `nfev` reports the correct number of function evaluations. + assert result.nfev == f.count + # Check that the number of iterations matches the theoretical value. + assert result.nit == n + + # Compare reported bracket to theoretical bracket and reported function + # values to function evaluated at bracket. + bracket = np.asarray([result.xl, result.xm, result.xr]) + assert_allclose(bracket, (lower, middle, upper)) + f_bracket = np.asarray([result.fl, result.fm, result.fr]) + assert_allclose(f_bracket, f(bracket, *args)) + + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously + def f(xs, js): + funcs = [lambda x: (x - 1.5)**2, + lambda x: x, + lambda x: x, + lambda x: np.nan] + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + xl0, xm0, xr0 = np.full(4, -1.0), np.full(4, 0.0), np.full(4, 1.0) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, + xmin=[-np.inf, -1.0, -np.inf, -np.inf], + args=args, maxiter=3) + + reference_flags = np.array([eim._ECONVERGED, _ELIMITS, + eim._ECONVERR, eim._EVALUEERR]) + assert_equal(result.status, reference_flags) + + @pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + @pytest.mark.parametrize("xmin", [-5, None]) + @pytest.mark.parametrize("xmax", [5, None]) + def test_dtypes(self, minimum, xmin, xmax, dtype): + xmin = xmin if xmin is None else dtype(xmin) + xmax = xmax if xmax is None else dtype(xmax) + minimum = dtype(minimum) + + def f(x, minimum): + return ((x - minimum)**2).astype(dtype) + + xl0, xm0, xr0 = np.array([-0.01, 0.0, 0.01], dtype=dtype) + result = _bracket_minimum( + f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(minimum, ) + ) + assert np.all(result.success) + assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype + assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(None, -4, xl0=4) + + message = '...must be numeric and real.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, 4+1j) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0='hello') + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xmin=np) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xmax=object()) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, factor=sum) + + message = "All elements of `factor` must be greater than 1." + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x, -4, factor=0.5) + + message = '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, 4, xl0=6) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=-6) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-3, xr0=-2) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-6, xr0=-5) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xl0=-np.nan) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=np.nan) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, [-2, -3], xl0=[-3, -4, -5]) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=-1) + + @pytest.mark.parametrize("xl0", [0.0, None]) + @pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15)) + @pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None)) + # Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum + # is within, or at varying disances to the left or right of the initial + # bracket. + @pytest.mark.parametrize( + "args", + ( + (1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0), + (121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0) + ) + ) + def test_scalar_no_limits(self, xl0, xm0, xr0, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + # xmin is set at 0.0 in all cases. + "xl0,xm0,xr0,xmin", + ( + # Initial bracket at varying distances from the xmin. + (0.5, 0.75, 1.0, 0.0), + (1.0, 2.5, 4.0, 0.0), + (2.0, 4.0, 6.0, 0.0), + (12.0, 16.0, 20.0, 0.0), + # Test default initial left endpoint selection. It should not + # be below xmin. + (None, 0.75, 1.0, 0.0), + (None, 2.5, 4.0, 0.0), + (None, 4.0, 6.0, 0.0), + (None, 16.0, 20.0, 0.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.0, 0.0), # Minimum is directly at xmin. + (1e-300, 0.0), # Minimum is extremely close to xmin. + (1e-20, 0.0), # Minimum is very close to xmin. + # Minimum at varying distances from xmin. + (0.1, 0.0), + (0.2, 0.0), + (0.4, 0.0) + ) + ) + def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + #xmax is set to 1.0 in all cases. + "xl0,xm0,xr0,xmax", + ( + # Bracket at varying distances from xmax. + (0.2, 0.3, 0.4, 1.0), + (0.05, 0.075, 0.1, 1.0), + (-0.2, -0.1, 0.0, 1.0), + (-21.2, -17.7, -14.2, 1.0), + # Test default right endpoint selection. It should not exceed xmax. + (0.2, 0.3, None, 1.0), + (0.05, 0.075, None, 1.0), + (-0.2, -0.1, None, 1.0), + (-21.2, -17.7, None, 1.0), + ) + ) + @pytest.mark.parametrize( + "args", ( + (0.9999999999999999, 0.0), # Minimum very close to xmax. + # Minimum at varying distances from xmax. + (0.9, 0.0), + (0.7, 0.0), + (0.5, 0.0) + ) + ) + def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args): + f = self.init_f() + kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + self.assert_valid_bracket(result) + assert result.status == 0 + assert result.success + assert result.nfev == f.count + + @pytest.mark.parametrize( + "xl0,xm0,xr0,xmin,xmax,args", + ( + ( # Case 1: + # Initial bracket. + 0.2, + 0.3, + 0.4, + # Function slopes down to the right from the bracket to a minimum + # at 1.0. xmax is also at 1.0 + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 2: + # Initial bracket. + 1.4, + 1.95, + 2.5, + # Function slopes down to the left from the bracket to a minimum at + # 0.3 with xmin set to 0.3. + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 3: + # Initial bracket. + 2.6, + 3.25, + 3.9, + # Function slopes down and to the right to a minimum at 99.4 with xmax + # at 99.4. Tests case where minimum is at xmax relatively further from + # the bracket. + None, + 99.4, + (99.4, 0) + ), + ( + # Case 4: + # Initial bracket. + 4, + 4.5, + 5, + # Function slopes down and to the left away from the bracket with a + # minimum at -26.3 with xmin set to -26.3. Tests case where minimum is + # at xmin relatively far from the bracket. + -26.3, + None, + (-26.3, 0) + ), + ( + # Case 5: + # Similar to Case 1 above, but tests default values of xl0 and xr0. + None, + 0.3, + None, + None, + 1.0, + (1.0, 0.0) + ), + ( # Case 6: + # Similar to Case 2 above, but tests default values of xl0 and xr0. + None, + 1.95, + None, + 0.3, + None, + (0.3, 0.0) + ), + ( + # Case 7: + # Similar to Case 3 above, but tests default values of xl0 and xr0. + None, + 3.25, + None, + None, + 99.4, + (99.4, 0) + ), + ( + # Case 8: + # Similar to Case 4 above, but tests default values of xl0 and xr0. + None, + 4.5, + None, + -26.3, + None, + (-26.3, 0) + ), + ) + ) + def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args): + f = self.init_f() + kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, args=args) + result = _bracket_minimum(f, xm0, **kwargs) + assert result.status == -1 + assert args[0] in (result.xl, result.xr) + assert result.nfev == f.count + + @pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for + # various input shapes. + a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (a, 0.0) + maxiter = 10 + + @np.vectorize + def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a): + return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin, + xmax=xmax, factor=factor, maxiter=maxiter, + args=(a, 0.0)) + + f = self.init_f() + + rng = np.random.default_rng(2348234) + xl0 = -rng.random(size=shape) + xr0 = rng.random(size=shape) + xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0) + xmin, xmax = 1e3*xl0, 1e3*xr0 + if shape: # make some elements un + i = rng.random(size=shape) > 0.5 + xmin[i], xmax[i] = -np.inf, np.inf + factor = rng.random(size=shape) + 1.5 + res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, + factor=factor, args=args, maxiter=maxiter) + refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel() + + attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + if shape: + assert np.all(res.success[1:-1]) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.nit.dtype, np.integer) + assert_equal(np.max(res.nit), f.count - 3) + self.assert_valid_bracket(res) + assert_allclose(res.fl, f(res.xl, *args)) + assert_allclose(res.fm, f(res.xm, *args)) + assert_allclose(res.fr, f(res.xr, *args)) + + def test_special_cases(self): + # Test edge cases and other special cases. + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 98 - 1 + + result = _bracket_minimum(f, -7, xr0=5) + assert result.success + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**2 - 10 + + xl0, xm0, xr0 = -3, -1, 2 + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0) + assert_equal([result.xl, result.xm, result.xr], [xl0, xm0, xr0]) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x**2 - 1 + + result = _bracket_minimum(f, -1, args=3) + assert result.success + assert_allclose(result.fl, f(result.xl, 3)) + + # Initial bracket is valid. + f = self.init_f() + xl0, xm0, xr0 = [-1.0, -0.2, 1.0] + args = (0, 0) + result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args) + assert f.count == 3 + + assert_equal( + [result.xl, result.xm, result.xr], + [xl0, xm0, xr0], + ) + assert_equal( + [result.fl, result.fm, result.fr], + [f(xl0, *args), f(xm0, *args), f(xr0, *args)], + ) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..5c88e23b9f756edb861157d58986bb13e5fa0810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py @@ -0,0 +1,827 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_array_less + +from scipy import stats +import scipy._lib._elementwise_iterative_method as eim + +from scipy.optimize._chandrupatla import (_chandrupatla_minimize, + _chandrupatla as _chandrupatla_root) +from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS + +from itertools import permutations +from .test_zeros import TestScalarRootFinders + +def f1(x): + return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2. + + +def f2(x): + return 5 + (x - 2.)**6 + + +def f3(x): + return np.exp(x) - 5*x + + +def f4(x): + return x**5. - 5*x**3. - 20.*x + 5. + + +def f5(x): + return 8*x**3 - 2*x**2 - 7*x + 3 + + +def _bracket_minimum(func, x1, x2): + phi = 1.61803398875 + maxiter = 100 + f1 = func(x1) + f2 = func(x2) + step = x2 - x1 + x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1 + else (x1, x2, f1, f2, step)) + + for i in range(maxiter): + step *= phi + x3 = x2 + step + f3 = func(x3) + if f3 < f2: + x1, x2, f1, f2 = x2, x3, f2, f3 + else: + break + return x1, x2, x3, f1, f2, f3 + + +cases = [ + (f1, -1, 11), + (f1, -2, 13), + (f1, -4, 13), + (f1, -8, 15), + (f1, -16, 16), + (f1, -32, 19), + (f1, -64, 20), + (f1, -128, 21), + (f1, -256, 21), + (f1, -512, 19), + (f1, -1024, 24), + (f2, -1, 8), + (f2, -2, 6), + (f2, -4, 6), + (f2, -8, 7), + (f2, -16, 8), + (f2, -32, 8), + (f2, -64, 9), + (f2, -128, 11), + (f2, -256, 13), + (f2, -512, 12), + (f2, -1024, 13), + (f3, -1, 11), + (f3, -2, 11), + (f3, -4, 11), + (f3, -8, 10), + (f3, -16, 14), + (f3, -32, 12), + (f3, -64, 15), + (f3, -128, 18), + (f3, -256, 18), + (f3, -512, 19), + (f3, -1024, 19), + (f4, -0.05, 9), + (f4, -0.10, 11), + (f4, -0.15, 11), + (f4, -0.20, 11), + (f4, -0.25, 11), + (f4, -0.30, 9), + (f4, -0.35, 9), + (f4, -0.40, 9), + (f4, -0.45, 10), + (f4, -0.50, 10), + (f4, -0.55, 10), + (f5, -0.05, 6), + (f5, -0.10, 7), + (f5, -0.15, 8), + (f5, -0.20, 10), + (f5, -0.25, 9), + (f5, -0.30, 8), + (f5, -0.35, 7), + (f5, -0.40, 7), + (f5, -0.45, 9), + (f5, -0.50, 9), + (f5, -0.55, 8) +] + + +class TestChandrupatlaMinimize: + + def f(self, x, loc): + dist = stats.norm() + return -dist.pdf(x - loc) + + @pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)]) + def test_basic(self, loc): + # Find mode of normal distribution. Compare mode against location + # parameter and value of pdf at mode against expected pdf. + res = _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc,)) + ref = loc + np.testing.assert_allclose(res.x, ref, rtol=1e-6) + np.testing.assert_allclose(res.fun, -stats.norm.pdf(0), atol=0, rtol=0) + assert res.x.shape == np.shape(ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + loc = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (loc,) + + @np.vectorize + def chandrupatla_single(loc_single): + return _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc_single,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_minimize(f, -5, 0, 5, args=args) + refs = chandrupatla_single(loc).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_fun = [ref.fun for ref in refs] + assert_allclose(res.fun.ravel(), ref_fun) + assert_equal(res.fun.shape, shape) + assert_equal(res.fun, self.f(res.x, *args)) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.f_evals) + assert_equal(res.nfev.shape, res.fun.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.f_evals-3) + assert_equal(res.nit.shape, res.fun.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + ref_xl = [ref.xl for ref in refs] + assert_allclose(res.xl.ravel(), ref_xl) + assert_equal(res.xl.shape, shape) + + ref_xm = [ref.xm for ref in refs] + assert_allclose(res.xm.ravel(), ref_xm) + assert_equal(res.xm.shape, shape) + + ref_xr = [ref.xr for ref in refs] + assert_allclose(res.xr.ravel(), ref_xr) + assert_equal(res.xr.shape, shape) + + ref_fl = [ref.fl for ref in refs] + assert_allclose(res.fl.ravel(), ref_fl) + assert_equal(res.fl.shape, shape) + assert_allclose(res.fl, self.f(res.xl, *args)) + + ref_fm = [ref.fm for ref in refs] + assert_allclose(res.fm.ravel(), ref_fm) + assert_equal(res.fm.shape, shape) + assert_allclose(res.fm, self.f(res.xm, *args)) + + ref_fr = [ref.fr for ref in refs] + assert_allclose(res.fr.ravel(), ref_fr) + assert_equal(res.fr.shape, shape) + assert_allclose(res.fr, self.f(res.xr, *args)) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: (x - 2.5) ** 2, + lambda x: x - 10, + lambda x: (x - 2.5) ** 4, + lambda x: np.nan] + + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + + res = _chandrupatla_minimize(f, [0]*4, [2]*4, [np.pi]*4, args=args, + maxiter=10) + + ref_flags = np.array([eim._ECONVERGED, + eim._ESIGNERR, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = rng.random(size=3) + bracket = (-5, 0, 5) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = abs(res1.xr - res1.xl) + assert_array_less(j1, 4*kwargs['xatol']) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = abs(res2.xr - res2.xl) + assert_array_less(j2, 4*kwargs['xatol']) + assert_array_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j1 = abs(res1.xr - res1.xl) + assert_array_less(j1, 4*kwargs['xrtol']*abs(res1.x)) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + j2 = abs(res2.xr - res2.xl) + assert_array_less(j2, 4*kwargs['xrtol']*abs(res2.x)) + assert_array_less(j2, j1) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = abs(res1.fl - 2 * res1.fm + res1.fr) + assert_array_less(h1, 2*kwargs['fatol']) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = abs(res2.fl - 2 * res2.fm + res2.fr) + assert_array_less(h2, 2*kwargs['fatol']) + assert_array_less(h2, h1) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h1 = abs(res1.fl - 2 * res1.fm + res1.fr) + assert_array_less(h1, 2*kwargs['frtol']*abs(res1.fun)) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs) + h2 = abs(res2.fl - 2 * res2.fm + res2.fr) + assert_array_less(h2, 2*kwargs['frtol']*abs(res2.fun)) + assert_array_less(h2, h1) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + loc = 0.612814 + bracket = (-5, 0, 5) + maxiter = 5 + + res = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + maxiter=maxiter) + assert not np.any(res.success) + assert np.all(res.nfev == maxiter+3) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xm, res.xr) == bracket + else: + changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr) + changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr) + assert np.all(changed_xr | changed_xl) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + assert_equal(self.f(res.xl, loc), res.fl) + assert_equal(self.f(res.xm, loc), res.fm) + assert_equal(self.f(res.xr, loc), res.fr) + assert_equal(self.f(res.x, loc), res.fun) + if callback.iter == maxiter: + raise StopIteration + + callback.xl = np.nan + callback.xr = np.nan + callback.iter = -1 # callback called once before first iteration + callback.res = None + + res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,), + callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize('case', cases) + def test_nit_expected(self, case): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 55 test cases, the number of iterations performed + # matches the number reported in the original paper. + func, x1, nit = case + + # Find bracket using the algorithm in the paper + step = 0.2 + x2 = x1 + step + x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2) + + # Use tolerances from original paper + xatol = 0.0001 + fatol = 0.000001 + xrtol = 1e-16 + frtol = 1e-16 + + res = _chandrupatla_minimize(func, x1, x2, x3, xatol=xatol, + fatol=fatol, xrtol=xrtol, frtol=frtol) + assert_equal(res.nit, nit) + + @pytest.mark.parametrize("loc", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, loc, dtype): + # Test that dtypes are preserved + + loc = dtype(loc) + + def f(x, loc): + assert x.dtype == dtype + return ((x - loc) ** 2).astype(dtype) + + res = _chandrupatla_minimize(f, dtype(-3), dtype(1), dtype(5), + args=(loc,)) + assert res.x.dtype == dtype + assert_allclose(res.x, loc, rtol=np.sqrt(np.finfo(dtype).eps)) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(None, -4, 0, 4) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4+1j, 0, 4) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, [-2, -3], [0, 0], [3, 4, 5]) + + message = "The shape of the array returned by `func` must be the same" + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: [x[0], x[1], x[1]], [-3, -3], + [0, 0], [5, 5]) + + message = 'Tolerances must be non-negative scalars.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, xrtol=np.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, frtol=np.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_minimize(lambda x: x, -4, 0, 4, callback='shrubbery') + + def test_bracket_order(self): + # Confirm that order of points in bracket doesn't matter + loc = np.linspace(-1, 1, 6)[:, np.newaxis] + brackets = np.array(list(permutations([-5, 0, 5]))).T + res = _chandrupatla_minimize(self.f, *brackets, args=(loc,)) + assert np.all(np.isclose(res.x, loc) | (res.fun == self.f(loc, loc))) + ref = res.x[:, 0] # all columns should be the same + assert_allclose(*np.broadcast_arrays(res.x.T, ref), rtol=1e-15) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return (x-1) ** 100 + + with np.errstate(invalid='ignore'): + res = _chandrupatla_minimize(f, -7, 0, 8, fatol=0, frtol=0) + assert res.success + assert_allclose(res.x, 1, rtol=1e-3) + assert_equal(res.fun, 0) + + # Test that if all elements of bracket equal minimizer, algorithm + # reports convergence + def f(x): + return (x-1)**2 + + res = _chandrupatla_minimize(f, 1, 1, 1) + assert res.success + assert_equal(res.x, 1) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return (x-1)**2 + + bracket = (-3, 1.1, 5) + res = _chandrupatla_minimize(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 3 + assert res.status == -2 + assert res.x == 1.1 # best so far + + # Test scalar `args` (not in tuple) + def f(x, c): + return (x-c)**2 - 1 + + res = _chandrupatla_minimize(f, -1, 0, 1, args=1/3) + assert_allclose(res.x, 1/3) + + # Test zero tolerances + # TODO: fatol/frtol = 0? + def f(x): + return -np.sin(x) + + res = _chandrupatla_minimize(f, 0, 1, np.pi, xatol=0, xrtol=0, + fatol=0, frtol=0) + assert res.success + # found a minimum exactly (according to floating point arithmetic) + assert res.xl < res.xm < res.xr + assert f(res.xl) == f(res.xm) == f(res.xr) + + +class TestChandrupatla(TestScalarRootFinders): + + def f(self, q, p): + return stats.norm.cdf(q) - p + + @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, p): + # Invert distribution CDF and compare against distrtibution `ppf` + res = _chandrupatla_root(self.f, -5, 5, args=(p,)) + ref = stats.norm().ppf(p) + np.testing.assert_allclose(res.x, ref) + assert res.x.shape == ref.shape + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + args = (p,) + + @np.vectorize + def chandrupatla_single(p): + return _chandrupatla_root(self.f, -5, 5, args=(p,)) + + def f(*args, **kwargs): + f.f_evals += 1 + return self.f(*args, **kwargs) + f.f_evals = 0 + + res = _chandrupatla_root(f, -5, 5, args=args) + refs = chandrupatla_single(p).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_fun = [ref.fun for ref in refs] + assert_allclose(res.fun.ravel(), ref_fun) + assert_equal(res.fun.shape, shape) + assert_equal(res.fun, self.f(res.x, *args)) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.f_evals) + assert_equal(res.nfev.shape, res.fun.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.f_evals-2) + assert_equal(res.nit.shape, res.fun.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + ref_xl = [ref.xl for ref in refs] + assert_allclose(res.xl.ravel(), ref_xl) + assert_equal(res.xl.shape, shape) + + ref_xr = [ref.xr for ref in refs] + assert_allclose(res.xr.ravel(), ref_xr) + assert_equal(res.xr.shape, shape) + + assert_array_less(res.xl, res.xr) + finite = np.isfinite(res.x) + assert np.all((res.x[finite] == res.xl[finite]) + | (res.x[finite] == res.xr[finite])) + + ref_fl = [ref.fl for ref in refs] + assert_allclose(res.fl.ravel(), ref_fl) + assert_equal(res.fl.shape, shape) + assert_allclose(res.fl, self.f(res.xl, *args)) + + ref_fr = [ref.fr for ref in refs] + assert_allclose(res.fr.ravel(), ref_fr) + assert_equal(res.fr.shape, shape) + assert_allclose(res.fr, self.f(res.xr, *args)) + + assert np.all(np.abs(res.fun[finite]) == + np.minimum(np.abs(res.fl[finite]), + np.abs(res.fr[finite]))) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + funcs = [lambda x: x - 2.5, + lambda x: x - 10, + lambda x: (x - 0.1)**3, + lambda x: np.nan] + return [funcs[j](x) for x, j in zip(xs, js)] + + args = (np.arange(4, dtype=np.int64),) + res = _chandrupatla_root(f, [0]*4, [np.pi]*4, args=args, maxiter=2) + + ref_flags = np.array([eim._ECONVERGED, + eim._ESIGNERR, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + rng = np.random.default_rng(2585255913088665241) + p = rng.random(size=3) + bracket = (-5, 5) + args = (p,) + kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) + + kwargs = kwargs0.copy() + kwargs['xatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res1.xr - res1.xl, 1e-3) + kwargs['xatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res2.xr - res2.xl, 1e-6) + assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['xrtol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res1.xr - res1.xl, 1e-3 * np.abs(res1.x)) + kwargs['xrtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(res2.xr - res2.xl, 1e-6 * np.abs(res2.x)) + assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) + + kwargs = kwargs0.copy() + kwargs['fatol'] = 1e-3 + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res1.fun), 1e-3) + kwargs['fatol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res2.fun), 1e-6) + assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) + + kwargs = kwargs0.copy() + kwargs['frtol'] = 1e-3 + x1, x2 = bracket + f0 = np.minimum(abs(self.f(x1, *args)), abs(self.f(x2, *args))) + res1 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res1.fun), 1e-3*f0) + kwargs['frtol'] = 1e-6 + res2 = _chandrupatla_root(self.f, *bracket, **kwargs) + assert_array_less(np.abs(res2.fun), 1e-6*f0) + assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + p = 0.612814 + bracket = (-5, 5) + maxiter = 5 + + def f(q, p): + res = stats.norm().cdf(q) - p + f.x = q + f.fun = res + return res + f.x = None + f.fun = None + + res = _chandrupatla_root(f, *bracket, args=(p,), + maxiter=maxiter) + assert not np.any(res.success) + assert np.all(res.nfev == maxiter+2) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + if callback.iter == 0: + # callback is called once with initial bracket + assert (res.xl, res.xr) == bracket + else: + changed = (((res.xl == callback.xl) & (res.xr != callback.xr)) + | ((res.xl != callback.xl) & (res.xr == callback.xr))) + assert np.all(changed) + + callback.xl = res.xl + callback.xr = res.xr + assert res.status == eim._EINPROGRESS + assert_equal(self.f(res.xl, p), res.fl) + assert_equal(self.f(res.xr, p), res.fr) + assert_equal(self.f(res.x, p), res.fun) + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.xl = None + callback.xr = None + + res2 = _chandrupatla_root(f, *bracket, args=(p,), + callback=callback) + + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS) + def test_nit_expected(self, case): + # Test that `_chandrupatla` implements Chandrupatla's algorithm: + # in all 40 test cases, the number of iterations performed + # matches the number reported in the original paper. + f, bracket, root, nfeval, id = case + # Chandrupatla's criterion is equivalent to + # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard + # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x + # that used by Chandrupatla in tests. + res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5) + assert_allclose(res.fun, f(root), rtol=1e-8, atol=2e-3) + assert_equal(res.nfev, nfeval) + + @pytest.mark.parametrize("root", (0.622, [0.622, 0.623])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, root, dtype): + # Test that dtypes are preserved + + root = dtype(root) + def f(x, root): + return ((x - root) ** 3).astype(dtype) + + res = _chandrupatla_root(f, dtype(-3), dtype(5), + args=(root,), xatol=1e-3) + assert res.x.dtype == dtype + assert np.allclose(res.x, root, atol=1e-3) or np.all(res.fun == 0) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(None, -4, 4) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4+1j, 4) + + message = "shape mismatch: objects cannot be broadcast" + # raised by `np.broadcast, but the traceback is readable IMO + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, [-2, -3], [3, 4, 5]) + + message = "The shape of the array returned by `func`..." + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) + + message = 'Tolerances must be non-negative scalars.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, xatol=-1) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, xrtol=np.nan) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, fatol='ekki') + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, frtol=np.nan) + + message = '`maxiter` must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, maxiter=1.5) + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, maxiter=-1) + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + _chandrupatla_root(lambda x: x, -4, 4, callback='shrubbery') + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = _chandrupatla_root(f, -7, 5) + assert res.success + assert_allclose(res.x, 1) + + # Test that if both ends of bracket equal root, algorithm reports + # convergence + def f(x): + return x**2 - 1 + + res = _chandrupatla_root(f, 1, 1) + assert res.success + assert_equal(res.x, 1) + + def f(x): + return 1/x + + with np.errstate(invalid='ignore'): + res = _chandrupatla_root(f, np.inf, np.inf) + assert res.success + assert_equal(res.x, np.inf) + + # Test maxiter = 0. Should do nothing to bracket. + def f(x): + return x**3 - 1 + + bracket = (-3, 5) + res = _chandrupatla_root(f, *bracket, maxiter=0) + assert res.xl, res.xr == bracket + assert res.nit == 0 + assert res.nfev == 2 + assert res.status == -2 + assert res.x == -3 # best so far + + # Test maxiter = 1 + res = _chandrupatla_root(f, *bracket, maxiter=1) + assert res.success + assert res.status == 0 + assert res.nit == 1 + assert res.nfev == 3 + assert_allclose(res.x, 1) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = _chandrupatla_root(f, -1, 1, args=3) + assert_allclose(res.x, 1/3) + + # # TODO: Test zero tolerance + # # ~~What's going on here - why are iterations repeated?~~ + # # tl goes to zero when xatol=xrtol=0. When function is nearly linear, + # # this causes convergence issues. + # def f(x): + # return np.cos(x) + # + # res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0) + # assert res.nit < 100 + # xp = np.nextafter(res.x, np.inf) + # xm = np.nextafter(res.x, -np.inf) + # assert np.abs(res.fun) < np.abs(f(xp)) + # assert np.abs(res.fun) < np.abs(f(xm)) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..11663ce778beb7e1046143b93fe2508f469727c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py @@ -0,0 +1,166 @@ +import math + +import numpy as np +from numpy.testing import assert_allclose, assert_, assert_array_equal +import pytest + +from scipy.optimize import fmin_cobyla, minimize, Bounds + + +class TestCobyla: + def setup_method(self): + self.x0 = [4.95, 0.66] + self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] + self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5, + 'maxiter': 100} + + def fun(self, x): + return x[0]**2 + abs(x[1])**3 + + def con1(self, x): + return x[0]**2 + x[1]**2 - 25 + + def con2(self, x): + return -self.con1(x) + + @pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely') + def test_simple(self, capfd): + # use disp=True as smoke test for gh-8118 + x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, + rhoend=1e-5, maxfun=100, disp=True) + assert_allclose(x, self.solution, atol=1e-4) + + def test_minimize_simple(self): + class Callback: + def __init__(self): + self.n_calls = 0 + self.last_x = None + + def __call__(self, x): + self.n_calls += 1 + self.last_x = x + + callback = Callback() + + # Minimize with method='COBYLA' + cons = ({'type': 'ineq', 'fun': self.con1}, + {'type': 'ineq', 'fun': self.con2}) + sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, + callback=callback, options=self.opts) + assert_allclose(sol.x, self.solution, atol=1e-4) + assert_(sol.success, sol.message) + assert_(sol.maxcv < 1e-5, sol) + assert_(sol.nfev < 70, sol) + assert_(sol.fun < self.fun(self.solution) + 1e-3, sol) + assert_(sol.nfev == callback.n_calls, + "Callback is not called exactly once for every function eval.") + assert_array_equal( + sol.x, + callback.last_x, + "Last design vector sent to the callback is not equal to returned value.", + ) + + def test_minimize_constraint_violation(self): + np.random.seed(1234) + pb = np.random.rand(10, 10) + spread = np.random.rand(10) + + def p(w): + return pb.dot(w) + + def f(w): + return -(w * spread).sum() + + def c1(w): + return 500 - abs(p(w)).sum() + + def c2(w): + return 5 - abs(p(w).sum()) + + def c3(w): + return 5 - abs(p(w)).max() + + cons = ({'type': 'ineq', 'fun': c1}, + {'type': 'ineq', 'fun': c2}, + {'type': 'ineq', 'fun': c3}) + w0 = np.zeros((10,)) + sol = minimize(f, w0, method='cobyla', constraints=cons, + options={'catol': 1e-6}) + assert_(sol.maxcv > 1e-6) + assert_(not sol.success) + + +def test_vector_constraints(): + # test that fmin_cobyla and minimize can take a combination + # of constraints, some returning a number and others an array + def fun(x): + return (x[0] - 1)**2 + (x[1] - 2.5)**2 + + def fmin(x): + return fun(x) - 1 + + def cons1(x): + a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) + return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + + a[i, 2] for i in range(len(a))]) + + def cons2(x): + return x # identity, acts as bounds x > 0 + + x0 = np.array([2, 0]) + cons_list = [fun, cons1, cons2] + + xsol = [1.4, 1.7] + fsol = 0.8 + + # testing fmin_cobyla + sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) + assert_allclose(sol, xsol, atol=1e-4) + + sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) + assert_allclose(fun(sol), 1, atol=1e-4) + + # testing minimize + constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.x, xsol, atol=1e-4) + assert_(sol.success, sol.message) + assert_allclose(sol.fun, fsol, atol=1e-4) + + constraints = {'type': 'ineq', 'fun': fmin} + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.fun, 1, atol=1e-4) + + +class TestBounds: + # Test cobyla support for bounds (only when used via `minimize`) + # Invalid bounds is tested in + # test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds + + def test_basic(self): + def f(x): + return np.sum(x**2) + + lb = [-1, None, 1, None, -0.5] + ub = [-0.5, -0.5, None, None, -0.5] + bounds = [(a, b) for a, b in zip(lb, ub)] + # these are converted to Bounds internally + + res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds) + ref = [-0.5, -0.5, 1, 0, -0.5] + assert res.success + assert_allclose(res.x, ref, atol=1e-3) + + def test_unbounded(self): + def f(x): + return np.sum(x**2) + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, 0, atol=1e-3) + + bounds = Bounds([1, -np.inf], [np.inf, np.inf]) + res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) + assert res.success + assert_allclose(res.x, [1, 0], atol=1e-3) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..f52c65cdd1d5075b978ac965d96dedbbeeb82e57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py @@ -0,0 +1,274 @@ +""" +Unit test for constraint conversion +""" + +import numpy as np +from numpy.testing import (assert_array_almost_equal, + assert_allclose, assert_warns, suppress_warnings) +import pytest +from scipy.optimize import (NonlinearConstraint, LinearConstraint, + OptimizeWarning, minimize, BFGS) +from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock, + IneqRosenbrock, EqIneqRosenbrock, + BoundedRosenbrock, Elec) + + +class TestOldToNew: + x0 = (2, 0) + bnds = ((0, None), (0, None)) + method = "trust-constr" + + def test_constraint_dictionary_1(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.4, 1.7], rtol=1e-4) + assert_allclose(res.fun, 0.8, rtol=1e-4) + + def test_constraint_dictionary_2(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = {'type': 'eq', + 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1], + 'args': (1, 1.1), + 'jac': lambda x, p1, p2: np.array([[p1, -p2]])} + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.7918552, 1.62895927]) + assert_allclose(res.fun, 1.3857466063348418) + + def test_constraint_dictionary_3(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)] + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.75, 1.75], rtol=1e-4) + assert_allclose(res.fun, 1.125, rtol=1e-4) + + +class TestNewToOld: + + def test_multiple_constraint_objects(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # mixed old and new + coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([LinearConstraint([1, -2, 0], -2, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4) + + def test_individual_constraint_objects(self): + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = [2, 0, 1] + + cone = [] # with equality constraints (can't use cobyla) + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # nonstandard data types for constraint equality bounds + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([1.21]))) + + # multiple equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, 1.21)) # two same equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.4], [1.21, 1.4])) # two different equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.21], 1.21)) # equality specified two ways + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded + + # nonstandard data types for constraint inequality bounds + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([np.inf]))) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + np.array(-np.inf), -3)) + + # multiple inequalities/equalities + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, np.inf)) # two same inequalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.1, .8], [1.2, 1.4])) # bounded above and below + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below + + # quick check of LinearConstraint class (very little new code to test) + cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], + [1.21, -np.inf], [1.21, 1.4])) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) + + for con in cone: + funs = {} + for method in methods[::2]: # skip cobyla + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + + +class TestNewToOldSLSQP: + method = 'slsqp' + elec = Elec(n_electrons=2) + elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047, + -0.73597044, 0.34180668, -0.34180667]) + brock = BoundedRosenbrock() + brock.x_opt = [0, 0] + list_of_problems = [Maratos(), + HyperbolicIneq(), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + elec, + brock + ] + + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_array_almost_equal(result.x, prob.x_opt, decimal=3) + + def test_warn_mixed_constraints(self): + # warns about inefficiency of mixed equality/inequality constraints + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], + [1.1, .8], [1.1, 1.4]) + bnds = ((0, None), (0, None), (0, None)) + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), + method=self.method, bounds=bnds, constraints=cons) + + def test_warn_ignored_options(self): + # warns about constraint options being ignored + def fun(x): + return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 + x0 = (2, 0, 1) + + if self.method == "slsqp": + bnds = ((0, None), (0, None), (0, None)) + else: + bnds = None + + cons = NonlinearConstraint(lambda x: x[0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = LinearConstraint([1, 0, 0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = [] + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + keep_feasible=True)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + hess=BFGS())) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_jac_sparsity=42)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_rel_step=42)) + cons.append(LinearConstraint([1, 0, 0], 2, np.inf, + keep_feasible=True)) + for con in cons: + assert_warns(OptimizeWarning, minimize, fun, x0, + method=self.method, bounds=bnds, constraints=cons) + + +class TestNewToOldCobyla: + method = 'cobyla' + + list_of_problems = [ + Elec(n_electrons=2), + Elec(n_electrons=4), + ] + + @pytest.mark.slow + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + truth = minimize(prob.fun, prob.x0, + method='trust-constr', + bounds=prob.bounds, + constraints=prob.constr) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_allclose(result.fun, truth.fun, rtol=1e-3) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4186ba7b6dd6f56b89e2f39add9eb16e6beccb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py @@ -0,0 +1,255 @@ +import pytest +import numpy as np +from numpy.testing import TestCase, assert_array_equal +import scipy.sparse as sps +from scipy.optimize._constraints import ( + Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint, + new_bounds_to_old, old_bound_to_new, strict_bounds) + + +class TestStrictBounds(TestCase): + def test_scalarvalue_unique_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [2, 2, 2]) + assert_array_equal(strict_ub, [4, 4, 4]) + + def test_vectorvalue_unique_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 5, 6] + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, 2, 3]) + assert_array_equal(strict_ub, [4, 5, 6]) + + def test_scalarvalue_vector_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = [False, True, False] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, 2, -np.inf]) + assert_array_equal(strict_ub, [np.inf, 4, np.inf]) + + def test_vectorvalue_vector_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 6, np.inf] + enforce_feasibility = [True, False, True] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, -np.inf, 3]) + assert_array_equal(strict_ub, [4, np.inf, np.inf]) + + +def test_prepare_constraint_infeasible_x0(): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + enforce_feasibility = np.array([False, True, True], dtype=bool) + bounds = Bounds(lb, ub, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, bounds, x0) + + pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3]) + assert (pc.violation([1, 2, 3]) > 0).any() + assert (pc.violation([0.25, 21, 31]) == 0).all() + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + enforce_feasibility = np.array([True, True, True], dtype=bool) + linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, linear, x0) + + pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0), + [1, 2, 3, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + def fun(x): + return A.dot(x) + + def jac(x): + return A + + def hess(x, v): + return sps.csr_matrix((4, 4)) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess, + enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, nonlinear, x0) + + pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4]) + assert (pc.violation([1, 2, 3, 4]) > 0).any() + assert (pc.violation([-10, 2, -10, 4]) == 0).all() + + +def test_violation(): + def cons_f(x): + return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) + + nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) + pc = PreparedConstraint(nlc, [0.5, 1]) + + assert_array_equal(pc.violation([0.5, 1]), [0., 0.]) + + np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1]) + + np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0]) + + np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14]) + + +def test_new_bounds_to_old(): + lb = np.array([-np.inf, 2, 3]) + ub = np.array([3, np.inf, 10]) + + bounds = [(None, 3), (2, None), (3, 10)] + assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds) + + bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)] + assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb) + + bounds_no_lb = [(None, 3), (None, None), (None, 10)] + assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb) + + bounds_single_ub = [(None, 20), (2, 20), (3, 20)] + assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub) + + bounds_no_ub = [(None, None), (2, None), (3, None)] + assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub) + + bounds_single_both = [(1, 2), (1, 2), (1, 2)] + assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both) + + bounds_no_both = [(None, None), (None, None), (None, None)] + assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both) + + +def test_old_bounds_to_new(): + bounds = ([1, 2], (None, 3), (-1, None)) + lb_true = np.array([1, -np.inf, -1]) + ub_true = np.array([2, 3, np.inf]) + + lb, ub = old_bound_to_new(bounds) + assert_array_equal(lb, lb_true) + assert_array_equal(ub, ub_true) + + bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))] + lb, ub = old_bound_to_new(bounds) + + assert_array_equal(lb, [-np.inf, 1]) + assert_array_equal(ub, [np.inf, 1]) + + +class TestBounds: + def test_repr(self): + # so that eval works + from numpy import array, inf # noqa: F401 + for args in ( + (-1.0, 5.0), + (-1.0, np.inf, True), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf])), + (np.array([1.0, -np.inf]), np.array([2.0, np.inf]), + np.array([True, False])), + ): + bounds = Bounds(*args) + bounds2 = eval(repr(Bounds(*args))) + assert_array_equal(bounds.lb, bounds2.lb) + assert_array_equal(bounds.ub, bounds2.ub) + assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible) + + def test_array(self): + # gh13501 + b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0]) + assert isinstance(b.lb, np.ndarray) + assert isinstance(b.ub, np.ndarray) + + def test_defaults(self): + b1 = Bounds() + b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf)) + assert b1.lb == b2.lb + assert b1.ub == b2.ub + + def test_input_validation(self): + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + Bounds(sps.coo_array([1, 2]), [1, 2]) + with pytest.raises(ValueError, match=message): + Bounds([1, 2], sps.coo_array([1, 2])) + + message = "`keep_feasible` must be a dense array." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True])) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + Bounds([1, 2], [1, 2, 3]) + + def test_residual(self): + bounds = Bounds(-2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2])) + + +class TestLinearConstraint: + def test_defaults(self): + A = np.eye(4) + lc = LinearConstraint(A) + lc2 = LinearConstraint(A, -np.inf, np.inf) + assert_array_equal(lc.lb, lc2.lb) + assert_array_equal(lc.ub, lc2.ub) + + def test_input_validation(self): + A = np.eye(4) + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], [1, 2, 3]) + + message = "Constraint limits must be dense arrays" + with pytest.raises(ValueError, match=message): + LinearConstraint(A, sps.coo_array([1, 2]), [2, 3]) + with pytest.raises(ValueError, match=message): + LinearConstraint(A, [1, 2], sps.coo_array([2, 3])) + + message = "`keep_feasible` must be a dense array" + with pytest.raises(ValueError, match=message): + keep_feasible = sps.coo_array([True, True]) + LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible) + + A = np.empty((4, 3, 5)) + message = "`A` must have exactly two dimensions." + with pytest.raises(ValueError, match=message): + LinearConstraint(A) + + def test_residual(self): + A = np.eye(2) + lc = LinearConstraint(A, -2, 4) + x0 = [-1, 2] + np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2])) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..2f859c1143eb6b63c439fe278bfdd4fdaa15410f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py @@ -0,0 +1,92 @@ +""" +Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``, +and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a +3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st, +2nd, and 3rd order terms in ``args``. + +.. math:: + + f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0 + +The 3rd order polynomial function is written in Cython and called in a Python +wrapper named after the zero function. See the private ``_zeros`` Cython module +in `scipy.optimize.cython_optimze` for more information. +""" + +import numpy.testing as npt +from scipy.optimize.cython_optimize import _zeros + +# CONSTANTS +# Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9]. +# The ARGS have 3 elements just to show how this could be done for any cubic +# polynomial. +A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term +ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms +XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions +# absolute and relative tolerances and max iterations for zeros functions +XTOL, RTOL, MITR = 0.001, 0.001, 10 +EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0] +# = [1.2599210498948732, +# 1.2805791649874942, +# 1.300591446851387, +# 1.3200061217959123, +# 1.338865900164339, +# 1.3572088082974532, +# 1.375068867074141, +# 1.3924766500838337, +# 1.4094597464129783, +# 1.4260431471424087] + + +# test bisect +def test_bisect(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test ridder +def test_ridder(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brenth +def test_brenth(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq +def test_brentq(): + npt.assert_allclose( + EXPECTED, + list( + _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) + ), + rtol=RTOL, atol=XTOL + ) + + +# test brentq with full output +def test_brentq_full_output(): + output = _zeros.full_output_example( + (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR) + npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL) + npt.assert_equal(6, output['iterations']) + npt.assert_equal(7, output['funcalls']) + npt.assert_equal(0, output['error_num']) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..80e1606a8710e67bcd500303e5ae5d54aa3a5676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py @@ -0,0 +1,758 @@ +import pytest +import numpy as np +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_, assert_allclose, + assert_equal) +from scipy.sparse import csr_matrix +from scipy.sparse.linalg import LinearOperator +from scipy.optimize._differentiable_functions import (ScalarFunction, + VectorFunction, + LinearVectorFunction, + IdentityVectorFunction) +from scipy.optimize import rosen, rosen_der, rosen_hess +from scipy.optimize._hessian_update_strategy import BFGS + + +class ExScalarFunction: + + def __init__(self): + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + self.ngev += 1 + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + self.nhev += 1 + return 4*np.eye(2) + + +class TestScalarFunction(TestCase): + + def test_finite_difference_grad(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, nfev) + approx = ScalarFunction(ex.fun, x0, (), '2-point', + ex.hess, None, (-np.inf, np.inf)) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + + x = [10, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.0, 1.0] + g_analit = analit.grad(x) + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + def test_fun_and_grad(self): + ex = ExScalarFunction() + + def fg_allclose(x, y): + assert_allclose(x[0], y[0]) + assert_allclose(x[1], y[1]) + + # with analytic gradient + x0 = [2.0, 0.3] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + assert analit.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(analit.fun_and_grad(x0), fg) + + # with finite difference gradient + x0 = [2.0, 0.3] + sf = ScalarFunction(ex.fun, x0, (), '3-point', + ex.hess, None, (-np.inf, np.inf)) + assert sf.ngev == 1 + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + assert sf.ngev == 1 + + x0[1] = 1. + fg = ex.fun(x0), ex.grad(x0) + fg_allclose(sf.fun_and_grad(x0), fg) + + def test_finite_difference_hess_linear_operator(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = ScalarFunction(ex.fun, x0, (), ex.grad, + '2-point', None, (-np.inf, np.inf)) + assert_(isinstance(approx.H, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v)) + nfev += 1 + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + def test_x_storage_overlap(self): + # Scalar_Function should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + + def f(x): + return np.sum(np.asarray(x) ** 2) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x = np.array([1., 2., 3.]) + sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf)) + + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert x is not sf.x + + x[0] = 0. + f1 = sf.fun(x) + assert_equal(f1, 13.0) + + x[0] = 1 + f2 = sf.fun(x) + assert_equal(f2, 14.0) + assert x is not sf.x + + # gh13740 x is changed in user function + def ff(x): + x *= x # overwrite x + return np.sum(x) + + x = np.array([1., 2., 3.]) + sf = ScalarFunction( + ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf) + ) + assert x is not sf.x + assert_equal(sf.fun(x), 14.0) + assert_equal(sf.x, np.array([1., 2., 3.])) + assert x is not sf.x + + def test_lowest_x(self): + # ScalarFunction should remember the lowest func(x) visited. + x0 = np.array([2, 3, 4]) + sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess, + None, None) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + sf.fun([1, 1, 1]) + sf.fun(x0) + sf.fun([1.01, 1, 1.0]) + sf.grad([1.01, 1, 1.0]) + assert_equal(sf._lowest_f, 0.0) + assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) + + def test_float_size(self): + x0 = np.array([2, 3, 4]).astype(np.float32) + + # check that ScalarFunction/approx_derivative always send the correct + # float width + def rosen_(x): + assert x.dtype == np.float32 + return rosen(x) + + sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess, + None, (-np.inf, np.inf)) + res = sf.fun(x0) + assert res.dtype == np.float32 + + +class ExVectorialFunction: + + def __init__(self): + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0], + 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype) + + def jac(self, x): + self.njev += 1 + return np.array([[4*x[0]-1, 4*x[1]], + [12*x[0]**2-3, 8*x[1]]], dtype=x.dtype) + + def hess(self, x, v): + self.nhev += 1 + return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0], + [0, 8]]) + + +class TestVectorialFunction(TestCase): + + def test_finite_difference_jac(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + + x0 = [1.0, 0.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + + x = [10, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx, decimal=4) + + x = [2.0, 1.0] + J_analit = analit.jac(x) + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(J_analit, J_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + def test_finite_difference_hess_linear_operator(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + v0 = [1.0, 2.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None, + (-np.inf, np.inf), None) + assert_(isinstance(approx.H, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p)) + nfev += 1 + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x, v0) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p), + decimal=5) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + v = [1.0, 1.0] + H_analit = analit.hess(x, v) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.jac(x) + H_analit = analit.hess(x, v0) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + v = [2.3, 5.2] + _ = analit.jac(x) + H_analit = analit.hess(x, v) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + def test_x_storage_overlap(self): + # VectorFunction should not store references to arrays, it should + # store copies - this checks that updating an array in-place causes + # Scalar_Function.x to be updated. + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]) + + vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None, + (-np.inf, np.inf), None) + + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + # now test with a HessianUpdate strategy specified + hess = BFGS() + x0 = np.array([1.0, 0.0]) + vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None, + (-np.inf, np.inf), None) + + with pytest.warns(UserWarning): + # filter UserWarning because ExVectorialFunction is linear and + # a quasi-Newton approximation is used for the Hessian. + assert x0 is not vf.x + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 2. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + x0[0] = 1. + assert_equal(vf.fun(x0), ex.fun(x0)) + assert x0 is not vf.x + + def test_float_size(self): + ex = ExVectorialFunction() + x0 = np.array([1.0, 0.0]).astype(np.float32) + + vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + + res = vf.fun(x0) + assert res.dtype == np.float32 + + res = vf.jac(x0) + assert res.dtype == np.float32 + + +def test_LinearVectorFunction(): + A_dense = np.array([ + [-1, 2, 0], + [0, 4, 2] + ]) + x0 = np.zeros(3) + A_sparse = csr_matrix(A_dense) + x = np.array([1, -1, 0]) + v = np.array([-1, 1]) + Ax = np.array([-3, -4]) + + f1 = LinearVectorFunction(A_dense, x0, None) + assert_(not f1.sparse_jacobian) + + f2 = LinearVectorFunction(A_dense, x0, True) + assert_(f2.sparse_jacobian) + + f3 = LinearVectorFunction(A_dense, x0, False) + assert_(not f3.sparse_jacobian) + + f4 = LinearVectorFunction(A_sparse, x0, None) + assert_(f4.sparse_jacobian) + + f5 = LinearVectorFunction(A_sparse, x0, True) + assert_(f5.sparse_jacobian) + + f6 = LinearVectorFunction(A_sparse, x0, False) + assert_(not f6.sparse_jacobian) + + assert_array_equal(f1.fun(x), Ax) + assert_array_equal(f2.fun(x), Ax) + assert_array_equal(f1.jac(x), A_dense) + assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray()) + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) + + +def test_LinearVectorFunction_memoization(): + A = np.array([[-1, 2, 0], [0, 4, 2]]) + x0 = np.array([1, 2, -1]) + fun = LinearVectorFunction(A, x0, False) + + assert_array_equal(x0, fun.x) + assert_array_equal(A.dot(x0), fun.f) + + x1 = np.array([-1, 3, 10]) + assert_array_equal(A, fun.jac(x1)) + assert_array_equal(x1, fun.x) + assert_array_equal(A.dot(x0), fun.f) + assert_array_equal(A.dot(x1), fun.fun(x1)) + assert_array_equal(A.dot(x1), fun.f) + + +def test_IdentityVectorFunction(): + x0 = np.zeros(3) + + f1 = IdentityVectorFunction(x0, None) + f2 = IdentityVectorFunction(x0, False) + f3 = IdentityVectorFunction(x0, True) + + assert_(f1.sparse_jacobian) + assert_(not f2.sparse_jacobian) + assert_(f3.sparse_jacobian) + + x = np.array([-1, 2, 1]) + v = np.array([-2, 3, 0]) + + assert_array_equal(f1.fun(x), x) + assert_array_equal(f2.fun(x), x) + + assert_array_equal(f1.jac(x).toarray(), np.eye(3)) + assert_array_equal(f2.jac(x), np.eye(3)) + + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..5e010dabf4d060d9a4638be3a5f6491b00d79a2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py @@ -0,0 +1,396 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose, assert_equal + +import scipy._lib._elementwise_iterative_method as eim +from scipy import stats +from scipy.optimize._differentiate import (_differentiate as differentiate, + _EERRORINCREASE) + +class TestDifferentiate: + + def f(self, x): + return stats.norm().cdf(x) + + @pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, x): + # Invert distribution CDF and compare against distribution `ppf` + res = differentiate(self.f, x) + ref = stats.norm().pdf(x) + np.testing.assert_allclose(res.df, ref) + # This would be nice, but doesn't always work out. `error` is an + # estimate, not a bound. + assert_array_less(abs(res.df - ref), res.error) + assert res.x.shape == ref.shape + + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, case): + distname, params = case + dist = getattr(stats, distname)(*params) + x = dist.median() + 0.1 + res = differentiate(dist.cdf, x) + ref = dist.pdf(x) + assert_allclose(res.df, ref, atol=1e-10) + + @pytest.mark.parametrize('order', [1, 6]) + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, order, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + n = np.size(x) + + @np.vectorize + def _differentiate_single(x): + return differentiate(self.f, x, order=order) + + def f(x, *args, **kwargs): + f.nit += 1 + f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return self.f(x, *args, **kwargs) + f.nit = -1 + f.feval = 0 + + res = differentiate(f, x, order=order) + refs = _differentiate_single(x).ravel() + + ref_x = [ref.x for ref in refs] + assert_allclose(res.x.ravel(), ref_x) + assert_equal(res.x.shape, shape) + + ref_df = [ref.df for ref in refs] + assert_allclose(res.df.ravel(), ref_df) + assert_equal(res.df.shape, shape) + + ref_error = [ref.error for ref in refs] + assert_allclose(res.error.ravel(), ref_error, atol=5e-15) + assert_equal(res.error.shape, shape) + + ref_success = [ref.success for ref in refs] + assert_equal(res.success.ravel(), ref_success) + assert_equal(res.success.shape, shape) + assert np.issubdtype(res.success.dtype, np.bool_) + + ref_flag = [ref.status for ref in refs] + assert_equal(res.status.ravel(), ref_flag) + assert_equal(res.status.shape, shape) + assert np.issubdtype(res.status.dtype, np.integer) + + ref_nfev = [ref.nfev for ref in refs] + assert_equal(res.nfev.ravel(), ref_nfev) + assert_equal(np.max(res.nfev), f.feval) + assert_equal(res.nfev.shape, res.x.shape) + assert np.issubdtype(res.nfev.dtype, np.integer) + + ref_nit = [ref.nit for ref in refs] + assert_equal(res.nit.ravel(), ref_nit) + assert_equal(np.max(res.nit), f.nit) + assert_equal(res.nit.shape, res.x.shape) + assert np.issubdtype(res.nit.dtype, np.integer) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + rng = np.random.default_rng(5651219684984213) + def f(xs, js): + f.nit += 1 + funcs = [lambda x: x - 2.5, # converges + lambda x: np.exp(x)*rng.random(), # error increases + lambda x: np.exp(x), # reaches maxiter due to order=2 + lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN + res = [funcs[j](x) for x, j in zip(xs, js.ravel())] + return res + f.nit = 0 + + args = (np.arange(4, dtype=np.int64),) + res = differentiate(f, [1]*4, rtol=1e-14, order=2, args=args) + + ref_flags = np.array([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self): + # Same test as above but using `preserve_shape` option to simplify. + rng = np.random.default_rng(5651219684984213) + def f(x): + return [x - 2.5, # converges + np.exp(x)*rng.random(), # error increases + np.exp(x), # reaches maxiter due to order=2 + np.full_like(x, np.nan)[()]] # stops due to NaN + + res = differentiate(f, 1, rtol=1e-14, order=2, preserve_shape=True) + + ref_flags = np.array([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR]) + assert_equal(res.status, ref_flags) + + def test_preserve_shape(self): + # Test `preserve_shape` option + def f(x): + return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2] + + x = 0 + ref = [1, 3*np.cos(3*x), 1+10*np.cos(10*x), + 20*np.cos(20*x)*(x-1)**2 + 2*np.sin(20*x)*(x-1)] + res = differentiate(f, x, preserve_shape=True) + assert_allclose(res.df, ref) + + def test_convergence(self): + # Test that the convergence tolerances behave as expected + dist = stats.norm() + x = 1 + f = dist.cdf + ref = dist.pdf(x) + kwargs0 = dict(atol=0, rtol=0, order=4) + + kwargs = kwargs0.copy() + kwargs['atol'] = 1e-3 + res1 = differentiate(f, x, **kwargs) + assert_array_less(abs(res1.df - ref), 1e-3) + kwargs['atol'] = 1e-6 + res2 = differentiate(f, x, **kwargs) + assert_array_less(abs(res2.df - ref), 1e-6) + assert_array_less(abs(res2.df - ref), abs(res1.df - ref)) + + kwargs = kwargs0.copy() + kwargs['rtol'] = 1e-3 + res1 = differentiate(f, x, **kwargs) + assert_array_less(abs(res1.df - ref), 1e-3 * np.abs(ref)) + kwargs['rtol'] = 1e-6 + res2 = differentiate(f, x, **kwargs) + assert_array_less(abs(res2.df - ref), 1e-6 * np.abs(ref)) + assert_array_less(abs(res2.df - ref), abs(res1.df - ref)) + + def test_step_parameters(self): + # Test that step factors have the expected effect on accuracy + dist = stats.norm() + x = 1 + f = dist.cdf + ref = dist.pdf(x) + + res1 = differentiate(f, x, initial_step=0.5, maxiter=1) + res2 = differentiate(f, x, initial_step=0.05, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + res1 = differentiate(f, x, step_factor=2, maxiter=1) + res2 = differentiate(f, x, step_factor=20, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + # `step_factor` can be less than 1: `initial_step` is the minimum step + kwargs = dict(order=4, maxiter=1, step_direction=0) + res = differentiate(f, x, initial_step=0.5, step_factor=0.5, **kwargs) + ref = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + # This is a similar test for one-sided difference + kwargs = dict(order=2, maxiter=1, step_direction=1) + res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, + **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + kwargs['step_direction'] = -1 + res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs) + ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, + **kwargs) + assert_allclose(res.df, ref.df, rtol=5e-15) + + def test_step_direction(self): + # test that `step_direction` works as expected + def f(x): + y = np.exp(x) + y[(x < 0) + (x > 2)] = np.nan + return y + + x = np.linspace(0, 2, 10) + step_direction = np.zeros_like(x) + step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1 + res = differentiate(f, x, step_direction=step_direction) + assert_allclose(res.df, np.exp(x)) + assert np.all(res.success) + + def test_vectorized_step_direction_args(self): + # test that `step_direction` and `args` are vectorized properly + def f(x, p): + return x ** p + + def df(x, p): + return p * x ** (p - 1) + + x = np.array([1, 2, 3, 4]).reshape(-1, 1, 1) + hdir = np.array([-1, 0, 1]).reshape(1, -1, 1) + p = np.array([2, 3]).reshape(1, 1, -1) + res = differentiate(f, x, step_direction=hdir, args=(p,)) + ref = np.broadcast_to(df(x, p), res.df.shape) + assert_allclose(res.df, ref) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + x = 0.612814 + dist = stats.norm() + maxiter = 3 + + def f(x): + res = dist.cdf(x) + return res + + default_order = 8 + res = differentiate(f, x, maxiter=maxiter, rtol=1e-15) + assert not np.any(res.success) + assert np.all(res.nfev == default_order + 1 + (maxiter - 1)*2) + assert np.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + assert res.df not in callback.dfs + callback.dfs.add(res.df) + assert res.status == eim._EINPROGRESS + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.dfs = set() + + res2 = differentiate(f, x, callback=callback, rtol=1e-15) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert callback.res[key] == eim._EINPROGRESS + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize("hdir", (-1, 0, 1)) + @pytest.mark.parametrize("x", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) + def test_dtype(self, hdir, x, dtype): + # Test that dtypes are preserved + x = np.asarray(x, dtype=dtype)[()] + + def f(x): + assert x.dtype == dtype + return np.exp(x) + + def callback(res): + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + + res = differentiate(f, x, order=4, step_direction=hdir, + callback=callback) + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + eps = np.finfo(dtype).eps + assert_allclose(res.df, np.exp(res.x), rtol=np.sqrt(eps)) + + def test_input_validation(self): + # Test input validation for appropriate error messages + + message = '`func` must be callable.' + with pytest.raises(ValueError, match=message): + differentiate(None, 1) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, -4+1j) + + message = "When `preserve_shape=False`, the shape of the array..." + with pytest.raises(ValueError, match=message): + differentiate(lambda x: [1, 2, 3], [-2, -3]) + + message = 'Tolerances and step parameters must be non-negative...' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, atol=-1) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, rtol='ekki') + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, initial_step=None) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, step_factor=object()) + + message = '`maxiter` must be a positive integer.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, maxiter=1.5) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, maxiter=0) + + message = '`order` must be a positive integer' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, order=1.5) + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, order=0) + + message = '`preserve_shape` must be True or False.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, preserve_shape='herring') + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + differentiate(lambda x: x, 1, callback='shrubbery') + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 - 1 + + res = differentiate(f, 7, rtol=1e-10) + assert res.success + assert_allclose(res.df, 99*7.**98) + + # Test that if success is achieved in the correct number + # of iterations if function is a polynomial. Ideally, all polynomials + # of order 0-2 would get exact result with 0 refinement iterations, + # all polynomials of order 3-4 would be differentiated exactly after + # 1 iteration, etc. However, it seems that _differentiate needs an + # extra iteration to detect convergence based on the error estimate. + + for n in range(6): + x = 1.5 + def f(x): + return 2*x**n + + ref = 2*n*x**(n-1) + + res = differentiate(f, x, maxiter=1, order=max(1, n)) + assert_allclose(res.df, ref, rtol=1e-15) + assert_equal(res.error, np.nan) + + res = differentiate(f, x, order=max(1, n)) + assert res.success + assert res.nit == 2 + assert_allclose(res.df, ref, rtol=1e-15) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = differentiate(f, 2, args=3) + assert_allclose(res.df, 3) + + @pytest.mark.xfail + @pytest.mark.parametrize("case", ( # function, evaluation point + (lambda x: (x - 1) ** 3, 1), + (lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1) + )) + def test_saddle_gh18811(self, case): + # With default settings, _differentiate will not always converge when + # the true derivative is exactly zero. This tests that specifying a + # (tight) `atol` alleviates the problem. See discussion in gh-18811. + atol = 1e-16 + res = differentiate(*case, step_direction=[-1, 0, 1], atol=atol) + assert np.all(res.success) + assert_allclose(res.df, 0, atol=atol) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..f131527deac44edc095be9d4d96d57fa49dadd1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py @@ -0,0 +1,318 @@ +""" +Unit test for DIRECT optimization algorithm. +""" +from numpy.testing import (assert_allclose, + assert_array_less) +import pytest +import numpy as np +from scipy.optimize import direct, Bounds + + +class TestDIRECT: + + def setup_method(self): + self.fun_calls = 0 + self.bounds_sphere = 4*[(-2, 3)] + self.optimum_sphere_pos = np.zeros((4, )) + self.optimum_sphere = 0.0 + self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.]) + self.maxiter = 1000 + + # test functions + def sphere(self, x): + self.fun_calls += 1 + return np.square(x).sum() + + def inv(self, x): + if np.sum(x) == 0: + raise ZeroDivisionError() + return 1/np.sum(x) + + def nan_fun(self, x): + return np.nan + + def inf_fun(self, x): + return np.inf + + def styblinski_tang(self, pos): + x, y = pos + return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct(self, locally_biased): + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + # test accuracy + assert_allclose(res.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) + + # test that result lies within bounds + _bounds = np.asarray(self.bounds_sphere) + assert_array_less(_bounds[:, 0], res.x) + assert_array_less(res.x, _bounds[:, 1]) + + # test number of function evaluations. Original DIRECT overshoots by + # up to 500 evaluations in last iteration + assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1) + # test that number of function evaluations is correct + assert res.nfev == self.fun_calls + + # test that number of iterations is below supplied maximum + assert res.nit <= self.maxiter + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_direct_callback(self, locally_biased): + # test that callback does not change the result + res = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased) + + def callback(x): + x = 2*x + dummy = np.square(x) + print("DIRECT minimization algorithm callback test") + return dummy + + res_callback = direct(self.sphere, self.bounds_sphere, + locally_biased=locally_biased, + callback=callback) + + assert_allclose(res.x, res_callback.x) + + assert res.nit == res_callback.nit + assert res.nfev == res_callback.nfev + assert res.status == res_callback.status + assert res.success == res_callback.success + assert res.fun == res_callback.fun + assert_allclose(res.x, res_callback.x) + assert res.message == res_callback.message + + # test accuracy + assert_allclose(res_callback.x, self.optimum_sphere_pos, + rtol=1e-3, atol=1e-3) + assert_allclose(res_callback.fun, self.optimum_sphere, + atol=1e-5, rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_exception(self, locally_biased): + bounds = 4*[(-10, 10)] + with pytest.raises(ZeroDivisionError): + direct(self.inv, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_nan(self, locally_biased): + bounds = 4*[(-10, 10)] + direct(self.nan_fun, bounds=bounds, + locally_biased=locally_biased) + + @pytest.mark.parametrize("len_tol", [1e-3, 1e-4]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_len_tol(self, len_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, len_tol=len_tol, + vol_tol=1e-30, locally_biased=locally_biased) + assert res.status == 5 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The side length measure of the hyperrectangle containing " + "the lowest function value found is below " + f"len_tol={len_tol}") + assert res.message == message + + @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_vol_tol(self, vol_tol, locally_biased): + bounds = 4*[(-10., 10.)] + res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol, + len_tol=0., locally_biased=locally_biased) + assert res.status == 4 + assert res.success + assert_allclose(res.x, np.zeros((4, ))) + message = ("The volume of the hyperrectangle containing the lowest " + f"function value found is below vol_tol={vol_tol}") + assert res.message == message + + @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7]) + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_min(self, f_min_rtol, locally_biased): + # test that desired function value is reached within + # relative tolerance of f_min_rtol + f_min = 1. + bounds = 4*[(-2., 10.)] + res = direct(self.sphere, bounds=bounds, f_min=f_min, + f_min_rtol=f_min_rtol, + locally_biased=locally_biased) + assert res.status == 3 + assert res.success + assert res.fun < f_min * (1. + f_min_rtol) + message = ("The best function value found is within a relative " + f"error={f_min_rtol} of the (known) global optimum f_min") + assert res.message == message + + def circle_with_args(self, x, a, b): + return np.square(x[0] - a) + np.square(x[1] - b).sum() + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_f_circle_with_args(self, locally_biased): + bounds = 2*[(-2.0, 2.0)] + + res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250, + locally_biased=locally_biased) + assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5) + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxfun(self, locally_biased): + # test that if optimization runs for the maximal number of + # evaluations, success = False is returned + + maxfun = 100 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun, locally_biased=locally_biased) + assert result.success is False + assert result.status == 1 + assert result.nfev >= maxfun + message = ("Number of function evaluations done is " + f"larger than maxfun={maxfun}") + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_failure_maxiter(self, locally_biased): + # test that if optimization runs for the maximal number of + # iterations, success = False is returned + + maxiter = 10 + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter, locally_biased=locally_biased) + assert result.success is False + assert result.status == 2 + assert result.nit >= maxiter + message = f"Number of iterations is larger than maxiter={maxiter}" + assert result.message == message + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_bounds_variants(self, locally_biased): + # test that new and old bounds yield same result + + lb = [-6., 1., -5.] + ub = [-1., 3., 5.] + x_opt = np.array([-1., 1., 0.]) + bounds_old = list(zip(lb, ub)) + bounds_new = Bounds(lb, ub) + + res_old_bounds = direct(self.sphere, bounds_old, + locally_biased=locally_biased) + res_new_bounds = direct(self.sphere, bounds_new, + locally_biased=locally_biased) + + assert res_new_bounds.nfev == res_old_bounds.nfev + assert res_new_bounds.message == res_old_bounds.message + assert res_new_bounds.success == res_old_bounds.success + assert res_new_bounds.nit == res_old_bounds.nit + assert_allclose(res_new_bounds.x, res_old_bounds.x) + assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2) + + @pytest.mark.parametrize("locally_biased", [True, False]) + @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3]) + def test_epsilon(self, eps, locally_biased): + result = direct(self.styblinski_tang, self.bounds_stylinski_tang, + eps=eps, vol_tol=1e-6, + locally_biased=locally_biased) + assert result.status == 4 + assert result.success + + @pytest.mark.xslow + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_no_segmentation_fault(self, locally_biased): + # test that an excessive number of function evaluations + # does not result in segmentation fault + bounds = [(-5., 20.)] * 100 + result = direct(self.sphere, bounds, maxfun=10000000, + maxiter=1000000, locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("locally_biased", [True, False]) + def test_inf_fun(self, locally_biased): + # test that an objective value of infinity does not crash DIRECT + bounds = [(-5., 5.)] * 2 + result = direct(self.inf_fun, bounds, + locally_biased=locally_biased) + assert result is not None + + @pytest.mark.parametrize("len_tol", [-1, 2]) + def test_len_tol_validation(self, len_tol): + error_msg = "len_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + len_tol=len_tol) + + @pytest.mark.parametrize("vol_tol", [-1, 2]) + def test_vol_tol_validation(self, vol_tol): + error_msg = "vol_tol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + vol_tol=vol_tol) + + @pytest.mark.parametrize("f_min_rtol", [-1, 2]) + def test_fmin_rtol_validation(self, f_min_rtol): + error_msg = "f_min_rtol must be between 0 and 1." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + f_min_rtol=f_min_rtol, f_min=0.) + + @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)]) + def test_maxfun_wrong_type(self, maxfun): + error_msg = "maxfun must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=maxfun) + + @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)]) + def test_maxiter_wrong_type(self, maxiter): + error_msg = "maxiter must be of type int." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=maxiter) + + def test_negative_maxiter(self): + error_msg = "maxiter must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxiter=-1) + + def test_negative_maxfun(self): + error_msg = "maxfun must be > 0." + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + maxfun=-1) + + @pytest.mark.parametrize("bounds", ["bounds", 2., 0]) + def test_invalid_bounds_type(self, bounds): + error_msg = ("bounds must be a sequence or " + "instance of Bounds class") + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("bounds", + [Bounds([-1., -1], [-2, 1]), + Bounds([-np.nan, -1], [-2, np.nan]), + ] + ) + def test_incorrect_bounds(self, bounds): + error_msg = 'Bounds are not consistent min < max' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + def test_inf_bounds(self): + error_msg = 'Bounds must not be inf.' + bounds = Bounds([-np.inf, -1], [-2, np.inf]) + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, bounds) + + @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.]) + def test_locally_biased_validation(self, locally_biased): + error_msg = 'locally_biased must be True or False.' + with pytest.raises(ValueError, match=error_msg): + direct(self.styblinski_tang, self.bounds_stylinski_tang, + locally_biased=locally_biased) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..e6872997881c78b43a04d8eb4beed6cf7e758cb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py @@ -0,0 +1,208 @@ +import numpy as np +from copy import deepcopy +from numpy.linalg import norm +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less) +from scipy.optimize import (BFGS, SR1) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +class TestHessianUpdateStrategy(TestCase): + + def test_hessian_initialization(self): + quasi_newton = (BFGS(), SR1()) + + for qn in quasi_newton: + qn.initialize(5, 'hess') + B = qn.get_matrix() + + assert_array_equal(B, np.eye(5)) + + # For this list of points, it is known + # that no exception occur during the + # Hessian update. Hence no update is + # skipped or damped. + def test_rosenbrock_with_no_exception(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338], + [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691], + [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041], + [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744], + [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623], + [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448], + [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437], + [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581], + [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553], + [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149], + [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663], + [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288], + [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356], + [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912], + [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305], + [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047], + [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297], + [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032], + [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786], + [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + # Check curvature condition + for s, y in zip(delta_x, delta_grad): + if np.dot(s, y) <= 0: + raise ArithmeticError() + # Define QuasiNewton update + for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4), + SR1(init_scale=1)): + hess = deepcopy(quasi_newton) + inv_hess = deepcopy(quasi_newton) + hess.initialize(len(x_list[0]), 'hess') + inv_hess.initialize(len(x_list[0]), 'inv_hess') + # Compare the hessian and its inverse + for s, y in zip(delta_x, delta_grad): + hess.update(s, y) + inv_hess.update(s, y) + B = hess.get_matrix() + H = inv_hess.get_matrix() + assert_array_almost_equal(np.linalg.inv(B), H, decimal=10) + B_true = prob.hess(x_list[len(delta_x)]) + assert_array_less(norm(B - B_true)/norm(B_true), 0.1) + + def test_SR1_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = SR1(init_scale=1, min_denominator=1e-2) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[17] + y = delta_grad[17] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) + + def test_BFGS_skip_update(self): + # Define auxiliary problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = BFGS(init_scale=1, min_curvature=10) + hess.initialize(len(x_list[0]), 'hess') + # Compare the Hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[5] + y = delta_grad[5] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py new file mode 100644 index 0000000000000000000000000000000000000000..8e4452cd61c5400c13f4f239055352bae754ad7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py @@ -0,0 +1,43 @@ +import numpy as np +from numpy.testing import assert_allclose +import scipy.linalg +from scipy.optimize import minimize + + +def test_1(): + def f(x): + return x**4, 4*x**3 + + for gtol in [1e-8, 1e-12, 1e-20]: + for maxcor in range(20, 35): + result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20, + options={'gtol': gtol, 'maxcor': maxcor}) + + H1 = result.hess_inv(np.array([1])).reshape(1,1) + H2 = result.hess_inv.todense() + + assert_allclose(H1, H2) + + +def test_2(): + H0 = [[3, 0], [1, 2]] + + def f(x): + return np.dot(x, np.dot(scipy.linalg.inv(H0), x)) + + result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20]) + result2 = minimize(fun=f, method='BFGS', x0=[10, 20]) + + H1 = result1.hess_inv.todense() + + H2 = np.vstack(( + result1.hess_inv(np.array([1, 0])), + result1.hess_inv(np.array([0, 1])))) + + assert_allclose( + result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1), + result1.hess_inv(np.array([1, 0]))) + assert_allclose(H1, H2) + assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03) + + diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py new file mode 100644 index 0000000000000000000000000000000000000000..5b2a75684a27f31c07d5b8b20bd757385554edef --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py @@ -0,0 +1,128 @@ +import numpy as np +from scipy.optimize import _lbfgsb, minimize + + +def objfun(x): + """simplified objective func to test lbfgsb bound violation""" + x0 = [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085] + x1 = [1.0, 0.0, 1.0, 0.0, 0.0] + x2 = [1.0, + 0.0, + 0.9889733043149325, + 0.0, + 0.026353554421041155] + x3 = [1.0, + 0.0, + 0.9889917442915558, + 0.0, + 0.020341986743231205] + + f0 = 5163.647901211178 + f1 = 5149.8181642072905 + f2 = 5149.379332309634 + f3 = 5149.374490771297 + + g0 = np.array([-0.5934820547965749, + 1.6251549718258351, + -71.99168459202559, + 5.346636965797545, + 37.10732723092604]) + g1 = np.array([-0.43295349282641515, + 1.008607936794592, + 18.223666726602975, + 31.927010036981997, + -19.667512518739386]) + g2 = np.array([-0.4699874455100256, + 0.9466285353668347, + -0.016874360242016825, + 48.44999161133457, + 5.819631620590712]) + g3 = np.array([-0.46970678696829116, + 0.9612719312174818, + 0.006129809488833699, + 48.43557729419473, + 6.005481418498221]) + + if np.allclose(x, x0): + f = f0 + g = g0 + elif np.allclose(x, x1): + f = f1 + g = g1 + elif np.allclose(x, x2): + f = f2 + g = g2 + elif np.allclose(x, x3): + f = f3 + g = g3 + else: + raise ValueError( + 'Simplified objective function not defined ' + 'at requested point') + return (np.copy(f), np.copy(g)) + + +def test_setulb_floatround(): + """test if setulb() violates bounds + + checks for violation due to floating point rounding error + """ + + n = 5 + m = 10 + factr = 1e7 + pgtol = 1e-5 + maxls = 20 + iprint = -1 + nbd = np.full((n,), 2) + low_bnd = np.zeros(n, np.float64) + upper_bnd = np.ones(n, np.float64) + + x0 = np.array( + [0.8750000000000278, + 0.7500000000000153, + 0.9499999999999722, + 0.8214285714285992, + 0.6363636363636085]) + x = np.copy(x0) + + f = np.array(0.0, np.float64) + g = np.zeros(n, np.float64) + + fortran_int = _lbfgsb.types.intvar.dtype + + wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64) + iwa = np.zeros(3*n, fortran_int) + task = np.zeros(1, 'S60') + csave = np.zeros(1, 'S60') + lsave = np.zeros(4, fortran_int) + isave = np.zeros(44, fortran_int) + dsave = np.zeros(29, np.float64) + + task[:] = b'START' + + for n_iter in range(7): # 7 steps required to reproduce error + f, g = objfun(x) + + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + + assert (x <= upper_bnd).all() and (x >= low_bnd).all(), ( + "_lbfgsb.setulb() stepped to a point outside of the bounds") + + +def test_gh_issue18730(): + # issue 18730 reported that l-bfgs-b did not work with objectives + # returning single precision gradient arrays + def fun_single_precision(x): + x = x.astype(np.float32) + return np.sum(x**2), (2*x) + + res = minimize(fun_single_precision, x0=np.array([1., 1.]), jac=True, + method="l-bfgs-b") + np.testing.assert_allclose(res.fun, 0., atol=1e-15) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py new file mode 100644 index 0000000000000000000000000000000000000000..114e95d9a29f88768968a599be62f435325f81a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py @@ -0,0 +1,871 @@ +from itertools import product + +import numpy as np +from numpy.linalg import norm +from numpy.testing import (assert_, assert_allclose, + assert_equal, suppress_warnings) +from pytest import raises as assert_raises +from scipy.sparse import issparse, lil_matrix +from scipy.sparse.linalg import aslinearoperator + +from scipy.optimize import least_squares, Bounds +from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES +from scipy.optimize._lsq.common import EPS, make_strictly_feasible, CL_scaling_vector + + +def fun_trivial(x, a=0): + return (x - a)**2 + 5.0 + + +def jac_trivial(x, a=0.0): + return 2 * (x - a) + + +def fun_2d_trivial(x): + return np.array([x[0], x[1]]) + + +def jac_2d_trivial(x): + return np.identity(2) + + +def fun_rosenbrock(x): + return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + +def jac_rosenbrock(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0] + ]) + + +def jac_rosenbrock_bad_dim(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0], + [0.0, 0.0] + ]) + + +def fun_rosenbrock_cropped(x): + return fun_rosenbrock(x)[0] + + +def jac_rosenbrock_cropped(x): + return jac_rosenbrock(x)[0] + + +# When x is 1-D array, return is 2-D array. +def fun_wrong_dimensions(x): + return np.array([x, x**2, x**3]) + + +def jac_wrong_dimensions(x, a=0.0): + return np.atleast_3d(jac_trivial(x, a=a)) + + +def fun_bvp(x): + n = int(np.sqrt(x.shape[0])) + u = np.zeros((n + 2, n + 2)) + x = x.reshape((n, n)) + u[1:-1, 1:-1] = x + y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3 + return y.ravel() + + +class BroydenTridiagonal: + def __init__(self, n=100, mode='sparse'): + np.random.seed(0) + + self.n = n + + self.x0 = -np.ones(n) + self.lb = np.linspace(-2, -1.5, n) + self.ub = np.linspace(-0.8, 0.0, n) + + self.lb += 0.1 * np.random.randn(n) + self.ub += 0.1 * np.random.randn(n) + + self.x0 += 0.1 * np.random.randn(n) + self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub) + + if mode == 'sparse': + self.sparsity = lil_matrix((n, n), dtype=int) + i = np.arange(n) + self.sparsity[i, i] = 1 + i = np.arange(1, n) + self.sparsity[i, i - 1] = 1 + i = np.arange(n - 1) + self.sparsity[i, i + 1] = 1 + + self.jac = self._jac + elif mode == 'operator': + self.jac = lambda x: aslinearoperator(self._jac(x)) + elif mode == 'dense': + self.sparsity = None + self.jac = lambda x: self._jac(x).toarray() + else: + assert_(False) + + def fun(self, x): + f = (3 - x) * x + 1 + f[1:] -= x[:-1] + f[:-1] -= 2 * x[1:] + return f + + def _jac(self, x): + J = lil_matrix((self.n, self.n)) + i = np.arange(self.n) + J[i, i] = 3 - 2 * x + i = np.arange(1, self.n) + J[i, i - 1] = -1 + i = np.arange(self.n - 1) + J[i, i + 1] = -2 + return J + + +class ExponentialFittingProblem: + """Provide data and function for exponential fitting in the form + y = a + exp(b * x) + noise.""" + + def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1), + n_points=11, random_seed=None): + np.random.seed(random_seed) + self.m = n_points + self.n = 2 + + self.p0 = np.zeros(2) + self.x = np.linspace(x_range[0], x_range[1], n_points) + + self.y = a + np.exp(b * self.x) + self.y += noise * np.random.randn(self.m) + + outliers = np.random.randint(0, self.m, n_outliers) + self.y[outliers] += 50 * noise * np.random.rand(n_outliers) + + self.p_opt = np.array([a, b]) + + def fun(self, p): + return p[0] + np.exp(p[1] * self.x) - self.y + + def jac(self, p): + J = np.empty((self.m, self.n)) + J[:, 0] = 1 + J[:, 1] = self.x * np.exp(p[1] * self.x) + return J + + +def cubic_soft_l1(z): + rho = np.empty((3, z.size)) + + t = 1 + z + rho[0] = 3 * (t**(1/3) - 1) + rho[1] = t ** (-2/3) + rho[2] = -2/3 * t**(-5/3) + + return rho + + +LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1] + + +class BaseMixin: + def test_basic(self): + # Test that the basic calling sequence works. + res = least_squares(fun_trivial, 2., method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.fun, fun_trivial(res.x)) + + def test_args_kwargs(self): + # Test that args and kwargs are passed correctly to the functions. + a = 3.0 + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, args=(a,), + method=self.method) + res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a}, + method=self.method) + + assert_allclose(res.x, a, rtol=1e-4) + assert_allclose(res1.x, a, rtol=1e-4) + + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + args=(3, 4,), method=self.method) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + kwargs={'kaboom': 3}, method=self.method) + + def test_jac_options(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_trivial, 2.0, jac, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops', + method=self.method) + + def test_nfev_options(self): + for max_nfev in [None, 20]: + res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev, + method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + def test_x_scale_options(self): + for x_scale in [1.0, np.array([0.5]), 'jac']: + res = least_squares(fun_trivial, 2.0, x_scale=x_scale) + assert_allclose(res.x, 0) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale='auto', method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=-1.0, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=None, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=1.0+2.0j, method=self.method) + + def test_diff_step(self): + # res1 and res2 should be equivalent. + # res2 and res3 should be different. + res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1, + method=self.method) + res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1, + method=self.method) + res3 = least_squares(fun_trivial, 2.0, + diff_step=None, method=self.method) + assert_allclose(res1.x, 0, atol=1e-4) + assert_allclose(res2.x, 0, atol=1e-4) + assert_allclose(res3.x, 0, atol=1e-4) + assert_equal(res1.x, res2.x) + assert_equal(res1.nfev, res2.nfev) + + def test_incorrect_options_usage(self): + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'no_such_option': 100}) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'max_nfev': 100}) + + def test_full_result(self): + # MINPACK doesn't work very well with factor=100 on this problem, + # thus using low 'atol'. + res = least_squares(fun_trivial, 2.0, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.cost, 12.5) + assert_allclose(res.fun, 5) + assert_allclose(res.jac, 0, atol=1e-4) + assert_allclose(res.grad, 0, atol=1e-2) + assert_allclose(res.optimality, 0, atol=1e-2) + assert_equal(res.active_mask, 0) + if self.method == 'lm': + assert_(res.nfev < 30) + assert_(res.njev is None) + else: + assert_(res.nfev < 10) + assert_(res.njev < 10) + assert_(res.status > 0) + assert_(res.success) + + def test_full_result_single_fev(self): + # MINPACK checks the number of nfev after the iteration, + # so it's hard to tell what he is going to compute. + if self.method == 'lm': + return + + res = least_squares(fun_trivial, 2.0, method=self.method, + max_nfev=1) + assert_equal(res.x, np.array([2])) + assert_equal(res.cost, 40.5) + assert_equal(res.fun, np.array([9])) + assert_equal(res.jac, np.array([[4]])) + assert_equal(res.grad, np.array([36])) + assert_equal(res.optimality, 36) + assert_equal(res.active_mask, np.array([0])) + assert_equal(res.nfev, 1) + assert_equal(res.njev, 1) + assert_equal(res.status, 0) + assert_equal(res.success, 0) + + def test_rosenbrock(self): + x0 = [-2, 1] + x_opt = [1, 1] + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + with suppress_warnings() as sup: + sup.filter( + UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'" + ) + res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.x, x_opt) + + def test_rosenbrock_cropped(self): + x0 = [-2, 1] + if self.method == 'lm': + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, + x0, method='lm') + else: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock_cropped], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + res = least_squares( + fun_rosenbrock_cropped, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.cost, 0, atol=1e-14) + + def test_fun_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_wrong_dimensions, + 2.0, method=self.method) + + def test_jac_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, jac_wrong_dimensions, method=self.method) + + def test_fun_and_jac_inconsistent_dimensions(self): + x0 = [1, 2] + assert_raises(ValueError, least_squares, fun_rosenbrock, x0, + jac_rosenbrock_bad_dim, method=self.method) + + def test_x0_multidimensional(self): + x0 = np.ones(4).reshape(2, 2) + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_scalar(self): + x0 = 2.0 + 0.0*1j + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_array(self): + x0 = [1.0, 2.0 + 0.0*1j] + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_bvp(self): + # This test was introduced with fix #5556. It turned out that + # dogbox solver had a bug with trust-region radius update, which + # could block its progress and create an infinite loop. And this + # discrete boundary value problem is the one which triggers it. + n = 10 + x0 = np.ones(n**2) + if self.method == 'lm': + max_nfev = 5000 # To account for Jacobian estimation. + else: + max_nfev = 100 + res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method, + max_nfev=max_nfev) + + assert_(res.nfev < max_nfev) + assert_(res.cost < 0.5) + + def test_error_raised_when_all_tolerances_below_eps(self): + # Test that all 0 tolerances are not allowed. + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method=self.method, ftol=None, xtol=None, gtol=None) + + def test_convergence_with_only_one_tolerance_enabled(self): + if self.method == 'lm': + return # should not do test + x0 = [-2, 1] + x_opt = [1, 1] + for ftol, xtol, gtol in [(1e-8, None, None), + (None, 1e-8, None), + (None, None, 1e-8)]: + res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock, + ftol=ftol, gtol=gtol, xtol=xtol, + method=self.method) + assert_allclose(res.x, x_opt) + + +class BoundsMixin: + def test_inconsistent(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(10.0, 0.0), method=self.method) + + def test_infeasible(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(3., 4), method=self.method) + + def test_wrong_number(self): + assert_raises(ValueError, least_squares, fun_trivial, 2., + bounds=(1., 2, 3), method=self.method) + + def test_inconsistent_shape(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(1.0, [2.0, 3.0]), method=self.method) + # 1-D array wont't be broadcasted + assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0], + bounds=([0.0], [3.0, 4.0]), method=self.method) + + def test_in_bounds(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(-1.0, 3.0), method=self.method) + assert_allclose(res.x, 0.0, atol=1e-4) + assert_equal(res.active_mask, [0]) + assert_(-1 <= res.x <= 3) + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(0.5, 3.0), method=self.method) + assert_allclose(res.x, 0.5, atol=1e-4) + assert_equal(res.active_mask, [-1]) + assert_(0.5 <= res.x <= 3) + + def test_bounds_shape(self): + def get_bounds_direct(lb, ub): + return lb, ub + + def get_bounds_instances(lb, ub): + return Bounds(lb, ub) + + for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]: + for bounds_func in [get_bounds_direct, get_bounds_instances]: + x0 = [1.0, 1.0] + res = least_squares(fun_2d_trivial, x0, jac=jac) + assert_allclose(res.x, [0.0, 0.0]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func(0.5, [2.0, 2.0]), + method=self.method) + assert_allclose(res.x, [0.5, 0.5]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([0.3, 0.2], 3.0), + method=self.method) + assert_allclose(res.x, [0.3, 0.2]) + res = least_squares( + fun_2d_trivial, x0, jac=jac, + bounds=bounds_func([-1, 0.5], [1.0, 3.0]), + method=self.method) + assert_allclose(res.x, [0.0, 0.5], atol=1e-5) + + def test_bounds_instances(self): + res = least_squares(fun_trivial, 0.5, bounds=Bounds()) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0)) + assert_allclose(res.x, 1.0, atol=1e-4) + + res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0)) + assert_allclose(res.x, 0.0, atol=1e-4) + + res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0)) + assert_allclose(res.x, -1.0, atol=1e-4) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[-1.0, -1.0], ub=1.0)) + assert_allclose(res.x, [0.0, 0.0], atol=1e-5) + + res = least_squares(fun_2d_trivial, [0.5, 0.5], + bounds=Bounds(lb=[0.1, 0.1])) + assert_allclose(res.x, [0.1, 0.1], atol=1e-5) + + def test_rosenbrock_bounds(self): + x0_1 = np.array([-2.0, 1.0]) + x0_2 = np.array([2.0, 2.0]) + x0_3 = np.array([-2.0, 2.0]) + x0_4 = np.array([0.0, 2.0]) + x0_5 = np.array([-1.2, 1.0]) + problems = [ + (x0_1, ([-np.inf, -1.5], np.inf)), + (x0_2, ([-np.inf, 1.5], np.inf)), + (x0_3, ([-np.inf, 1.5], np.inf)), + (x0_4, ([-np.inf, 1.5], [1.0, np.inf])), + (x0_2, ([1.0, 1.5], [3.0, 3.0])), + (x0_5, ([-50.0, 0.0], [0.5, 100])) + ] + for x0, bounds in problems: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, [1.0, 0.5], 'jac'], + ['exact', 'lsmr']): + res = least_squares(fun_rosenbrock, x0, jac, bounds, + x_scale=x_scale, tr_solver=tr_solver, + method=self.method) + assert_allclose(res.optimality, 0.0, atol=1e-5) + + +class SparseMixin: + def test_exact_tr_solver(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='exact', method=self.method) + assert_raises(ValueError, least_squares, p.fun, p.x0, + tr_solver='exact', jac_sparsity=p.sparsity, + method=self.method) + + def test_equivalence(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares( + sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares( + dense.fun, dense.x0, jac=sparse.jac, + method=self.method) + assert_equal(res_sparse.nfev, res_dense.nfev) + assert_allclose(res_sparse.x, res_dense.x, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + + def test_tr_options(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + tr_options={'btol': 1e-10}) + assert_allclose(res.cost, 0, atol=1e-20) + + def test_wrong_parameters(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='best', method=self.method) + assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac, + tr_solver='lsmr', tr_options={'tol': 1e-10}) + + def test_solver_selection(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac, + method=self.method) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_(issparse(res_sparse.jac)) + assert_(isinstance(res_dense.jac, np.ndarray)) + + def test_numerical_jac(self): + p = BroydenTridiagonal() + for jac in ['2-point', '3-point', 'cs']: + res_dense = least_squares(p.fun, p.x0, jac, method=self.method) + res_sparse = least_squares( + p.fun, p.x0, jac,method=self.method, + jac_sparsity=p.sparsity) + assert_equal(res_dense.nfev, res_sparse.nfev) + assert_allclose(res_dense.x, res_sparse.x, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + + def test_with_bounds(self): + p = BroydenTridiagonal() + for jac, jac_sparsity in product( + [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]): + res_1 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, np.inf), + method=self.method,jac_sparsity=jac_sparsity) + res_2 = least_squares( + p.fun, p.x0, jac, bounds=(-np.inf, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + res_3 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + assert_allclose(res_1.optimality, 0, atol=1e-10) + assert_allclose(res_2.optimality, 0, atol=1e-10) + assert_allclose(res_3.optimality, 0, atol=1e-10) + + def test_wrong_jac_sparsity(self): + p = BroydenTridiagonal() + sparsity = p.sparsity[:-1] + assert_raises(ValueError, least_squares, p.fun, p.x0, + jac_sparsity=sparsity, method=self.method) + + def test_linear_operator(self): + p = BroydenTridiagonal(mode='operator') + res = least_squares(p.fun, p.x0, p.jac, method=self.method) + assert_allclose(res.cost, 0.0, atol=1e-20) + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, tr_solver='exact') + + def test_x_scale_jac_scale(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + x_scale='jac') + assert_allclose(res.cost, 0.0, atol=1e-20) + + p = BroydenTridiagonal(mode='operator') + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, x_scale='jac') + + +class LossFunctionMixin: + def test_options(self): + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_allclose(res.x, 0, atol=1e-15) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + loss='hinge', method=self.method) + + def test_fun(self): + # Test that res.fun is actual residuals, and not modified by loss + # function stuff. + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_equal(res.fun, fun_trivial(res.x)) + + def test_grad(self): + # Test that res.grad is true gradient of loss function at the + # solution. Use max_nfev = 1, to avoid reaching minimum. + x = np.array([2.0]) # res.x will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x * (x**2 + 5)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x) + + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5) + + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4)) + + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3)) + + def test_jac(self): + # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation + # of Hessian. This approximation is computed by doubly differentiating + # the cost function and dropping the part containing second derivative + # of f. For a scalar function it is computed as + # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the + # brackets is less than EPS it is replaced by EPS. Here, we check + # against the root of H. + + x = 2.0 # res.x will be this. + f = x**2 + 5 # res.fun will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x) + + # For `huber` loss the Jacobian correction is identically zero + # in outlier region, in such cases it is modified to be equal EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x * EPS**0.5) + + # Now, let's apply `loss_scale` to turn the residual into an inlier. + # The loss function becomes linear. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + f_scale=10, max_nfev=1) + assert_equal(res.jac, 2 * x) + + # 'soft_l1' always gives a positive scaling. + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75) + + # For 'cauchy' the correction term turns out to be negative, and it + # replaced by EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Now use scaling to turn the residual to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + f_scale=10, max_nfev=1, method=self.method) + fs = f / 10 + assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2)) + + # 'arctan' gives an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + f_scale=20.0, max_nfev=1, method=self.method) + fs = f / 20 + assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4)) + + # cubic_soft_l1 will give an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, + loss=cubic_soft_l1, f_scale=6, max_nfev=1) + fs = f / 6 + assert_allclose(res.jac, + 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6)) + + def test_robustness(self): + for noise in [0.1, 1.0]: + p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0) + + for jac in ['2-point', '3-point', 'cs', p.jac]: + res_lsq = least_squares(p.fun, p.p0, jac=jac, + method=self.method) + assert_allclose(res_lsq.optimality, 0, atol=1e-2) + for loss in LOSSES: + if loss == 'linear': + continue + res_robust = least_squares( + p.fun, p.p0, jac=jac, loss=loss, f_scale=noise, + method=self.method) + assert_allclose(res_robust.optimality, 0, atol=1e-2) + assert_(norm(res_robust.x - p.p_opt) < + norm(res_lsq.x - p.p_opt)) + + +class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'dogbox' + + +class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'trf' + + def test_lsmr_regularization(self): + p = BroydenTridiagonal() + for regularize in [True, False]: + res = least_squares(p.fun, p.x0, p.jac, method='trf', + tr_options={'regularize': regularize}) + assert_allclose(res.cost, 0, atol=1e-20) + + +class TestLM(BaseMixin): + method = 'lm' + + def test_bounds_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, bounds=(-3.0, 3.0), method='lm') + + def test_m_less_n_not_supported(self): + x0 = [-2, 1] + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, + method='lm') + + def test_sparse_not_supported(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_jac_sparsity_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + jac_sparsity=[1], method='lm') + + def test_LinearOperator_not_supported(self): + p = BroydenTridiagonal(mode="operator") + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_loss(self): + res = least_squares(fun_trivial, 2.0, loss='linear', method='lm') + assert_allclose(res.x, 0.0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method='lm', loss='huber') + + +def test_basic(): + # test that 'method' arg is really optional + res = least_squares(fun_trivial, 2.0) + assert_allclose(res.x, 0, atol=1e-10) + + +def test_small_tolerances_for_lm(): + for ftol, xtol, gtol in [(None, 1e-13, 1e-13), + (1e-13, None, 1e-13), + (1e-13, 1e-13, None)]: + assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol, + ftol=ftol, gtol=gtol, method='lm') + + +def test_fp32_gh12991(): + # checks that smaller FP sizes can be used in least_squares + # this is the minimum working example reported for gh12991 + np.random.seed(1) + + x = np.linspace(0, 1, 100).astype("float32") + y = np.random.random(100).astype("float32") + + def func(p, x): + return p[0] + p[1] * x + + def err(p, x, y): + return func(p, x) - y + + res = least_squares(err, [-1.0, -1.0], args=(x, y)) + # previously the initial jacobian calculated for this would be all 0 + # and the minimize would terminate immediately, with nfev=1, would + # report a successful minimization (it shouldn't have done), but be + # unchanged from the initial solution. + # It was terminating early because the underlying approx_derivative + # used a step size for FP64 when the working space was FP32. + assert res.nfev > 2 + assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5) + + +def test_gh_18793_and_19351(): + answer = 1e-12 + initial_guess = 1.1e-12 + + def chi2(x): + return (x-answer)**2 + + gtol = 1e-15 + res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf)) + # Original motivation: gh-18793 + # if we choose an initial condition that is close to the solution + # we shouldn't return an answer that is further away from the solution + + # Update: gh-19351 + # However this requirement does not go well with 'trf' algorithm logic. + # Some regressions were reported after the presumed fix. + # The returned solution is good as long as it satisfies the convergence + # conditions. + # Specifically in this case the scaled gradient will be sufficiently low. + + scaling, _ = CL_scaling_vector(res.x, res.grad, + np.atleast_1d(0), np.atleast_1d(np.inf)) + assert res.status == 1 # Converged by gradient + assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol + + +def test_gh_19103(): + # Checks that least_squares trf method selects a strictly feasible point, + # and thus succeeds instead of failing, + # when the initial guess is reported exactly at a boundary point. + # This is a reduced example from gh191303 + + ydata = np.array([0.] * 66 + [ + 1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1., + 1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1., + 0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4., + 6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11., + 10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29., + 28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52., + 67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107., + 108., 118., 96., 115., 138., 137., + ]) + xdata = np.arange(0, ydata.size) * 0.1 + + def exponential_wrapped(params): + A, B, x0 = params + return A * np.exp(B * (xdata - x0)) - ydata + + x0 = [0.01, 1., 5.] + bounds = ((0.01, 0, 0), (np.inf, 10, 20.9)) + res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds) + assert res.success diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..d59792da9eef38e313eaa0bca70f873627f8d3cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py @@ -0,0 +1,116 @@ +# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck +# License: BSD + +from numpy.testing import assert_array_equal +import pytest + +import numpy as np + +from scipy.optimize import linear_sum_assignment +from scipy.sparse import random +from scipy.sparse._sputils import matrix +from scipy.sparse.csgraph import min_weight_full_bipartite_matching +from scipy.sparse.csgraph.tests.test_matching import ( + linear_sum_assignment_assertions, linear_sum_assignment_test_cases +) + + +def test_linear_sum_assignment_input_shape(): + with pytest.raises(ValueError, match="expected a matrix"): + linear_sum_assignment([1, 2, 3]) + + +def test_linear_sum_assignment_input_object(): + C = [[1, 2, 3], [4, 5, 6]] + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(np.asarray(C))) + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(matrix(C))) + + +def test_linear_sum_assignment_input_bool(): + I = np.identity(3) + assert_array_equal(linear_sum_assignment(I.astype(np.bool_)), + linear_sum_assignment(I)) + + +def test_linear_sum_assignment_input_string(): + I = np.identity(3) + with pytest.raises(TypeError, match="Cannot cast array data"): + linear_sum_assignment(I.astype(str)) + + +def test_linear_sum_assignment_input_nan(): + I = np.diag([np.nan, 1, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_neginf(): + I = np.diag([1, -np.inf, 1]) + with pytest.raises(ValueError, match="contains invalid numeric entries"): + linear_sum_assignment(I) + + +def test_linear_sum_assignment_input_inf(): + I = np.identity(3) + I[:, 0] = np.inf + with pytest.raises(ValueError, match="cost matrix is infeasible"): + linear_sum_assignment(I) + + +def test_constant_cost_matrix(): + # Fixes #11602 + n = 8 + C = np.ones((n, n)) + row_ind, col_ind = linear_sum_assignment(C) + assert_array_equal(row_ind, np.arange(n)) + assert_array_equal(col_ind, np.arange(n)) + + +@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) +def test_linear_sum_assignment_trivial_cost(num_rows, num_cols): + C = np.empty(shape=(num_cols, num_rows)) + row_ind, col_ind = linear_sum_assignment(C) + assert len(row_ind) == 0 + assert len(col_ind) == 0 + + +@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) +def test_linear_sum_assignment_small_inputs(sign, test_case): + linear_sum_assignment_assertions( + linear_sum_assignment, np.array, sign, test_case) + + +# Tests that combine scipy.optimize.linear_sum_assignment and +# scipy.sparse.csgraph.min_weight_full_bipartite_matching +def test_two_methods_give_same_result_on_many_sparse_inputs(): + # As opposed to the test above, here we do not spell out the expected + # output; only assert that the two methods give the same result. + # Concretely, the below tests 100 cases of size 100x100, out of which + # 36 are infeasible. + np.random.seed(1234) + for _ in range(100): + lsa_raises = False + mwfbm_raises = False + sparse = random(100, 100, density=0.06, + data_rvs=lambda size: np.random.randint(1, 100, size)) + # In csgraph, zeros correspond to missing edges, so we explicitly + # replace those with infinities + dense = np.full(sparse.shape, np.inf) + dense[sparse.row, sparse.col] = sparse.data + sparse = sparse.tocsr() + try: + row_ind, col_ind = linear_sum_assignment(dense) + lsa_cost = dense[row_ind, col_ind].sum() + except ValueError: + lsa_raises = True + try: + row_ind, col_ind = min_weight_full_bipartite_matching(sparse) + mwfbm_cost = sparse[row_ind, col_ind].sum() + except ValueError: + mwfbm_raises = True + # Ensure that if one method raises, so does the other one. + assert lsa_raises == mwfbm_raises + if not lsa_raises: + assert lsa_cost == mwfbm_cost diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..49a0f8de5a20b82b3b036d2e835951d927c266ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py @@ -0,0 +1,2473 @@ +""" +Unit test for Linear Programming +""" +import sys +import platform + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + assert_array_less, assert_warns, suppress_warnings) +from pytest import raises as assert_raises +from scipy.optimize import linprog, OptimizeWarning +from scipy.optimize._numdiff import approx_derivative +from scipy.sparse.linalg import MatrixRankWarning +from scipy.linalg import LinAlgWarning +from scipy._lib._util import VisibleDeprecationWarning +import scipy.sparse +import pytest + +has_umfpack = True +try: + from scikits.umfpack import UmfpackWarning +except ImportError: + has_umfpack = False + +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 +except ImportError: + has_cholmod = False + + +def _assert_iteration_limit_reached(res, maxiter): + assert_(not res.success, "Incorrectly reported success") + assert_(res.success < maxiter, "Incorrectly reported number of iterations") + assert_equal(res.status, 1, "Failed to report iteration limit reached") + + +def _assert_infeasible(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 2, "failed to report infeasible status") + + +def _assert_unbounded(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 3, "failed to report unbounded status") + + +def _assert_unable_to_find_basic_feasible_sol(res): + # res: linprog result object + + # The status may be either 2 or 4 depending on why the feasible solution + # could not be found. If the underlying problem is expected to not have a + # feasible solution, _assert_infeasible should be used. + assert_(not res.success, "incorrectly reported success") + assert_(res.status in (2, 4), "failed to report optimization failure") + + +def _assert_success(res, desired_fun=None, desired_x=None, + rtol=1e-8, atol=1e-8): + # res: linprog result object + # desired_fun: desired objective function value or None + # desired_x: desired solution or None + if not res.success: + msg = f"linprog status {res.status}, message: {res.message}" + raise AssertionError(msg) + + assert_equal(res.status, 0) + if desired_fun is not None: + assert_allclose(res.fun, desired_fun, + err_msg="converged to an unexpected objective value", + rtol=rtol, atol=atol) + if desired_x is not None: + assert_allclose(res.x, desired_x, + err_msg="converged to an unexpected solution", + rtol=rtol, atol=atol) + + +def magic_square(n): + """ + Generates a linear program for which integer solutions represent an + n x n magic square; binary decision variables represent the presence + (or absence) of an integer 1 to n^2 in each position of the square. + """ + + np.random.seed(0) + M = n * (n**2 + 1) / 2 + + numbers = np.arange(n**4) // n**2 + 1 + + numbers = numbers.reshape(n**2, n, n) + + zeros = np.zeros((n**2, n, n)) + + A_list = [] + b_list = [] + + # Rule 1: use every number exactly once + for i in range(n**2): + A_row = zeros.copy() + A_row[i, :, :] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 2: Only one number per square + for i in range(n): + for j in range(n): + A_row = zeros.copy() + A_row[:, i, j] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 3: sum of rows is M + for i in range(n): + A_row = zeros.copy() + A_row[:, i, :] = numbers[:, i, :] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 4: sum of columns is M + for i in range(n): + A_row = zeros.copy() + A_row[:, :, i] = numbers[:, :, i] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 5: sum of diagonals is M + A_row = zeros.copy() + A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] + A_list.append(A_row.flatten()) + b_list.append(M) + A_row = zeros.copy() + A_row[:, range(n), range(-1, -n - 1, -1)] = \ + numbers[:, range(n), range(-1, -n - 1, -1)] + A_list.append(A_row.flatten()) + b_list.append(M) + + A = np.array(np.vstack(A_list), dtype=float) + b = np.array(b_list, dtype=float) + c = np.random.rand(A.shape[1]) + + return A, b, c, numbers, M + + +def lpgen_2d(m, n): + """ -> A b c LP test: m*n vars, m+n constraints + row sums == n/m, col sums == 1 + https://gist.github.com/denis-bz/8647461 + """ + np.random.seed(0) + c = - np.random.exponential(size=(m, n)) + Arow = np.zeros((m, m * n)) + brow = np.zeros(m) + for j in range(m): + j1 = j + 1 + Arow[j, j * n:j1 * n] = 1 + brow[j] = n / m + + Acol = np.zeros((n, m * n)) + bcol = np.zeros(n) + for j in range(n): + j1 = j + 1 + Acol[j, j::n] = 1 + bcol[j] = 1 + + A = np.vstack((Arow, Acol)) + b = np.hstack((brow, bcol)) + + return A, b, c.ravel() + + +def very_random_gen(seed=0): + np.random.seed(seed) + m_eq, m_ub, n = 10, 20, 50 + c = np.random.rand(n)-0.5 + A_ub = np.random.rand(m_ub, n)-0.5 + b_ub = np.random.rand(m_ub)-0.5 + A_eq = np.random.rand(m_eq, n)-0.5 + b_eq = np.random.rand(m_eq)-0.5 + lb = -np.random.rand(n) + ub = np.random.rand(n) + lb[lb < -np.random.rand()] = -np.inf + ub[ub > np.random.rand()] = np.inf + bounds = np.vstack((lb, ub)).T + return c, A_ub, b_ub, A_eq, b_eq, bounds + + +def nontrivial_problem(): + c = [-1, 8, 4, -6] + A_ub = [[-7, -7, 6, 9], + [1, -1, -3, 0], + [10, -10, -7, 7], + [6, -1, 3, 4]] + b_ub = [-3, 6, -6, 6] + A_eq = [[-10, 1, 1, -8]] + b_eq = [-4] + x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] + f_star = 7083 / 1391 + return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star + + +def l1_regression_prob(seed=0, m=8, d=9, n=100): + ''' + Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)} + x in R^d + y in R + n: number of training samples + d: dimension of x, i.e. x in R^d + phi: feature map R^d -> R^m + m: dimension of feature space + ''' + np.random.seed(seed) + phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping + w_true = np.random.randn(m) + x = np.random.normal(0, 1, size=(d, n)) # features + y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements + + # construct the problem + c = np.ones(m+n) + c[:m] = 0 + A_ub = scipy.sparse.lil_matrix((2*n, n+m)) + idx = 0 + for ii in range(n): + A_ub[idx, :m] = phi @ x[:, ii] + A_ub[idx, m+ii] = -1 + A_ub[idx+1, :m] = -1*phi @ x[:, ii] + A_ub[idx+1, m+ii] = -1 + idx += 2 + A_ub = A_ub.tocsc() + b_ub = np.zeros(2*n) + b_ub[0::2] = y + b_ub[1::2] = -y + bnds = [(None, None)]*m + [(0, None)]*n + return c, A_ub, b_ub, bnds + + +def generic_callback_test(self): + # Check that callback is as advertised + last_cb = {} + + def cb(res): + message = res.pop('message') + complete = res.pop('complete') + + assert_(res.pop('phase') in (1, 2)) + assert_(res.pop('status') in range(4)) + assert_(isinstance(res.pop('nit'), int)) + assert_(isinstance(complete, bool)) + assert_(isinstance(message, str)) + + last_cb['x'] = res['x'] + last_cb['fun'] = res['fun'] + last_cb['slack'] = res['slack'] + last_cb['con'] = res['con'] + + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) + + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + assert_allclose(last_cb['fun'], res['fun']) + assert_allclose(last_cb['x'], res['x']) + assert_allclose(last_cb['con'], res['con']) + assert_allclose(last_cb['slack'], res['slack']) + + +def test_unknown_solvers_and_options(): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') + message = "Unrecognized options detected: {'rr_method': 'ekki-ekki-ekki'}" + with pytest.warns(OptimizeWarning, match=message): + linprog(c, A_ub=A_ub, b_ub=b_ub, + options={"rr_method": 'ekki-ekki-ekki'}) + + +def test_choose_solver(): + # 'highs' chooses 'dual' + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + res = linprog(c, A_ub, b_ub, method='highs') + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + +def test_deprecation(): + with pytest.warns(DeprecationWarning): + linprog(1, method='interior-point') + with pytest.warns(DeprecationWarning): + linprog(1, method='revised simplex') + with pytest.warns(DeprecationWarning): + linprog(1, method='simplex') + + +def test_highs_status_message(): + res = linprog(1, method='highs') + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.status == 0 + assert res.message.startswith(msg) + + A, b, c, numbers, M = magic_square(6) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + options = {"time_limit": 0.1} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs', + options=options, integrality=integrality) + msg = "Time limit reached. (HiGHS Status 13:" + assert res.status == 1 + assert res.message.startswith(msg) + + options = {"maxiter": 10} + res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs-ds', + options=options) + msg = "Iteration limit reached. (HiGHS Status 14:" + assert res.status == 1 + assert res.message.startswith(msg) + + res = linprog(1, bounds=(1, -1), method='highs') + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.status == 2 + assert res.message.startswith(msg) + + res = linprog(-1, method='highs') + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.status == 3 + assert res.message.startswith(msg) + + from scipy.optimize._linprog_highs import _highs_to_scipy_status_message + status, message = _highs_to_scipy_status_message(58, "Hello!") + msg = "The HiGHS status code was not recognized. (HiGHS Status 58:" + assert status == 4 + assert message.startswith(msg) + + status, message = _highs_to_scipy_status_message(None, None) + msg = "HiGHS did not provide a status code. (HiGHS Status None: None)" + assert status == 4 + assert message.startswith(msg) + + +def test_bug_17380(): + linprog([1, 1], A_ub=[[-1, 0]], b_ub=[-2.5], integrality=[1, 1]) + + +A_ub = None +b_ub = None +A_eq = None +b_eq = None +bounds = None + +################ +# Common Tests # +################ + + +class LinprogCommonTests: + """ + Base class for `linprog` tests. Generally, each test will be performed + once for every derived class of LinprogCommonTests, each of which will + typically change self.options and/or self.method. Effectively, these tests + are run for many combination of method (simplex, revised simplex, and + interior point) and options (such as pivoting rule or sparse treatment). + """ + + ################## + # Targeted Tests # + ################## + + def test_callback(self): + generic_callback_test(self) + + def test_disp(self): + # test that display option does not break anything. + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"disp": True}) + _assert_success(res, desired_fun=-64.049494229) + + def test_docstring_example(self): + # Example from linprog docstring. + c = [-1, 4] + A = [[-3, 1], [1, 2]] + b = [6, 4] + x0_bounds = (None, None) + x1_bounds = (-3, None) + res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), + options=self.options, method=self.method) + _assert_success(res, desired_fun=-22) + + def test_type_error(self): + # (presumably) checks that linprog recognizes type errors + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1] + A_eq = [[1]] + b_eq = "hello" + assert_raises(TypeError, linprog, + c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + + def test_aliasing_b_ub(self): + # (presumably) checks that linprog does not modify b_ub + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_ub = np.array([[1.0]]) + b_ub_orig = np.array([3.0]) + b_ub = b_ub_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-4, desired_x=[-4]) + assert_allclose(b_ub_orig, b_ub) + + def test_aliasing_b_eq(self): + # (presumably) checks that linprog does not modify b_eq + # This is tested more carefully in test__linprog_clean_inputs.py + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq_orig = np.array([3.0]) + b_eq = b_eq_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + assert_allclose(b_eq_orig, b_eq) + + def test_non_ndarray_args(self): + # (presumably) checks that linprog accepts list in place of arrays + # This is tested more carefully in test__linprog_clean_inputs.py + c = [1.0] + A_ub = [[1.0]] + b_ub = [3.0] + A_eq = [[1.0]] + b_eq = [2.0] + bounds = (-1.0, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=2, desired_x=[2]) + + def test_unknown_options(self): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + def f(c, A_ub=None, b_ub=None, A_eq=None, + b_eq=None, bounds=None, options={}): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=options) + + o = {key: self.options[key] for key in self.options} + o['spam'] = 42 + + assert_warns(OptimizeWarning, f, + c, A_ub=A_ub, b_ub=b_ub, options=o) + + def test_integrality_without_highs(self): + # ensure that using `integrality` parameter without `method='highs'` + # raises warning and produces correct solution to relaxed problem + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + with np.testing.assert_warns(OptimizeWarning): + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1.8, 2.8]) + np.testing.assert_allclose(res.fun, -2.8) + + def test_invalid_inputs(self): + + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Test ill-formatted bounds + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) + with np.testing.suppress_warnings() as sup: + sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged") + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) + assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) + + # Test other invalid inputs + assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) + + # this last check doesn't make sense for sparse presolve + if ("_sparse_presolve" in self.options and + self.options["_sparse_presolve"]): + return + # there aren't 3-D sparse matrices + + assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) + + def test_sparse_constraints(self): + # gh-13559: improve error message for sparse inputs when unsupported + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + np.random.seed(0) + m = 100 + n = 150 + A_eq = scipy.sparse.rand(m, n, 0.5) + x_valid = np.random.randn(n) + c = np.random.randn(n) + ub = x_valid + np.random.rand(n) + lb = x_valid - np.random.rand(n) + bounds = np.column_stack((lb, ub)) + b_eq = A_eq * x_valid + + if self.method in {'simplex', 'revised simplex'}: + # simplex and revised simplex should raise error + with assert_raises(ValueError, match=f"Method '{self.method}' " + "does not support sparse constraint matrices."): + linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + else: + # other methods should succeed + options = {**self.options} + if self.method in {'interior-point'}: + options['sparse'] = True + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=options) + assert res.success + + def test_maxiter(self): + # test iteration limit w/ Enzo example + c = [4, 8, 3, 0, 0, 0] + A = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b = [185, 155, 600] + np.random.seed(0) + maxiter = 3 + res = linprog(c, A_eq=A, b_eq=b, method=self.method, + options={"maxiter": maxiter}) + _assert_iteration_limit_reached(res, maxiter) + assert_equal(res.nit, maxiter) + + def test_bounds_fixed(self): + + # Test fixed bounds (upper equal to lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, 1), + method=self.method, options=self.options) + _assert_success(res, 1, 1) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], + method=self.method, options=self.options) + _assert_success(res, 12, [5, -1, 3]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1], bounds=[(1, 1), (1, 3)], + method=self.method, options=self.options) + _assert_success(res, 2, [1, 1]) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], + bounds=[(-5, 5), (0, 10), (3.5, 3.5)], + method=self.method, options=self.options) + _assert_success(res, 15, [1, 7, 3.5]) + if do_presolve: + assert_equal(res.nit, 0) + + def test_bounds_infeasible(self): + + # Test ill-valued bounds (upper less than lower) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + do_presolve = self.options.get('presolve', True) + + res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + def test_bounds_infeasible_2(self): + + # Test ill-valued bounds (lower inf, upper -inf) + # If presolve option True, test if solution found in presolve (i.e. + # number of iterations is 0). + # For the simplex method, the cases do not result in an + # infeasible status, but in a RuntimeWarning. This is a + # consequence of having _presolve() take care of feasibility + # checks. See issue gh-11618. + do_presolve = self.options.get('presolve', True) + simplex_without_presolve = not do_presolve and self.method == 'simplex' + + c = [1, 2, 3] + bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] + bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] + + if simplex_without_presolve: + def g(c, bounds): + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + return res + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_1) + + with pytest.warns(RuntimeWarning): + with pytest.raises(IndexError): + g(c, bounds=bounds_2) + else: + res = linprog(c=c, bounds=bounds_1, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + res = linprog(c=c, bounds=bounds_2, + method=self.method, options=self.options) + _assert_infeasible(res) + if do_presolve: + assert_equal(res.nit, 0) + + def test_empty_constraint_1(self): + c = [-1, -2] + res = linprog(c, method=self.method, options=self.options) + _assert_unbounded(res) + + def test_empty_constraint_2(self): + c = [-1, 1, -1, 1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve requires no iterations + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_empty_constraint_3(self): + c = [1, -1, 1, -1] + bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] + res = linprog(c, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) + + def test_inequality_constraints(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf + c = np.array([3, 2]) * -1 # maximize + A_ub = [[2, 1], + [1, 1], + [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-18, desired_x=[2, 6]) + + def test_inequality_constraints2(self): + # Minimize linear function subject to linear inequality constraints. + # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf + # (dead link) + c = [6, 3] + A_ub = [[0, 3], + [-1, -1], + [-2, 1]] + b_ub = [2, -1, -1] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) + + def test_bounds_simple(self): + c = [1, 2] + bounds = (1, 2) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + bounds = [(1, 2), (1, 2)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + def test_bounded_below_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (1.0, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_below_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (0.5, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounded_above_only_1(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (None, 10.0) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounded_above_only_2(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, 4) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_infinity(self): + c = np.ones(3) + A_eq = np.eye(3) + b_eq = np.array([1, 2, 3]) + bounds = (-np.inf, np.inf) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) + + def test_bounds_mixed(self): + # Problem has one unbounded variable and + # another with a negative lower bound. + c = np.array([-1, 4]) * -1 # maximize + A_ub = np.array([[-3, 1], + [1, 2]], dtype=np.float64) + b_ub = [6, 4] + x0_bounds = (-np.inf, np.inf) + x1_bounds = (-3, np.inf) + bounds = (x0_bounds, x1_bounds) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) + + def test_bounds_equal_but_infeasible(self): + c = [-4, 1] + A_ub = [[7, -2], [0, 1], [2, -2]] + b_ub = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_but_infeasible2(self): + c = [-4, 1] + A_eq = [[7, -2], [0, 1], [2, -2]] + b_eq = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_no_presolve(self): + # There was a bug when a lower and upper bound were equal but + # presolve was not on to eliminate the variable. The bound + # was being converted to an equality constraint, but the bound + # was not eliminated, leading to issues in postprocessing. + c = [1, 2] + A_ub = [[1, 2], [1.1, 2.2]] + b_ub = [4, 8] + bounds = [(1, 2), (2, 2)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_zero_column_1(self): + m, n = 3, 4 + np.random.seed(0) + c = np.random.rand(n) + c[1] = 1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + A_ub = [[1, 0, 1, 1]] + b_ub = 3 + bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-9.7087836730413404) + + def test_zero_column_2(self): + if self.method in {'highs-ds', 'highs-ipm'}: + # See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648 + pytest.xfail() + + np.random.seed(0) + m, n = 2, 4 + c = np.random.rand(n) + c[1] = -1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + + A_ub = np.random.rand(m, n) + A_ub[:, 1] = 0 + b_ub = np.random.rand(m) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + # Unboundedness detected in presolve + if self.options.get('presolve', True) and "highs" not in self.method: + # HiGHS detects unboundedness or infeasibility in presolve + # It needs an iteration of simplex to be sure of unboundedness + # Other solvers report that the problem is unbounded if feasible + assert_equal(res.nit, 0) + + def test_zero_row_1(self): + c = [1, 2, 3] + A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_eq = [0, 3, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3) + + def test_zero_row_2(self): + A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_ub = [0, 3, 0] + c = [1, 2, 3] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0) + + def test_zero_row_3(self): + m, n = 2, 4 + c = np.random.rand(n) + A_eq = np.random.rand(m, n) + A_eq[0, :] = 0 + b_eq = np.random.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_zero_row_4(self): + m, n = 2, 4 + c = np.random.rand(n) + A_ub = np.random.rand(m, n) + A_ub[0, :] = 0 + b_ub = -np.random.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_1(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 2, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_eq_2(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 1, 4] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=4) + + def test_singleton_row_ub_1(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -2, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_singleton_row_ub_2(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -0.5, 4] + bounds = [(None, None), (0, None), (0, None), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0.5) + + def test_infeasible(self): + # Test linprog response to an infeasible problem + c = [-1, -1] + A_ub = [[1, 0], + [0, 1], + [-1, -1]] + b_ub = [2, 2, -5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_infeasible_inequality_bounds(self): + c = [1] + A_ub = [[2]] + b_ub = 4 + bounds = (5, 6) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve + if self.options.get('presolve', True): + assert_equal(res.nit, 0) + + def test_unbounded(self): + # Test linprog response to an unbounded problem + c = np.array([1, 1]) * -1 # maximize + A_ub = [[-1, 1], + [-1, -1]] + b_ub = [-1, -2] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_unbounded_below_no_presolve_corrected(self): + c = [1] + bounds = [(None, 1)] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c=c, bounds=bounds, + method=self.method, + options=o) + if self.method == "revised simplex": + # Revised simplex has a special pathway for no constraints. + assert_equal(res.status, 5) + else: + _assert_unbounded(res) + + def test_unbounded_no_nontrivial_constraints_1(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, -1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (0, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_unbounded_no_nontrivial_constraints_2(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, 1]) + A_ub = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1]]) + b_ub = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (None, 0)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + if not self.method.lower().startswith("highs"): + assert_equal(res.x[-1], -np.inf) + assert_equal(res.message[:36], + "The problem is (trivially) unbounded") + + def test_cyclic_recovery(self): + # Test linprogs recovery from cycling using the Klee-Minty problem + # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf + c = np.array([100, 10, 1]) * -1 # maximize + A_ub = [[1, 0, 0], + [20, 1, 0], + [200, 20, 1]] + b_ub = [1, 100, 10000] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) + + def test_cyclic_bland(self): + # Test the effect of Bland's rule on a cycling problem + c = np.array([-10, 57, 9, 24.]) + A_ub = np.array([[0.5, -5.5, -2.5, 9], + [0.5, -1.5, -0.5, 1], + [1, 0, 0, 0]]) + b_ub = [0, 0, 1] + + # copy the existing options dictionary but change maxiter + maxiter = 100 + o = {key: val for key, val in self.options.items()} + o['maxiter'] = maxiter + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + + if self.method == 'simplex' and not self.options.get('bland'): + # simplex cycles without Bland's rule + _assert_iteration_limit_reached(res, o['maxiter']) + else: + # other methods, including simplex with Bland's rule, succeed + _assert_success(res, desired_x=[1, 0, 1, 0]) + # note that revised simplex skips this test because it may or may not + # cycle depending on the initial basis + + def test_remove_redundancy_infeasibility(self): + # mostly a test of redundancy removal, which is carefully tested in + # test__remove_redundancy.py + m, n = 10, 10 + c = np.random.rand(n) + A_eq = np.random.rand(m, n) + b_eq = np.random.rand(m) + A_eq[-1, :] = 2 * A_eq[-2, :] + b_eq[-1] *= -1 + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + ################# + # General Tests # + ################# + + def test_nontrivial_problem(self): + # Problem involves all constraint types, + # negative resource limits, and rounding issues. + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + + def test_lpgen_problem(self): + # Test linprog with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + A_ub, b_ub, c = lpgen_2d(20, 20) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-64.049494229) + + def test_network_flow(self): + # A network flow problem with supply and demand at nodes + # and with costs along directed edges. + # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf + c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] + n, p = -1, 1 + A_eq = [ + [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], + [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], + [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], + [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], + [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] + b_eq = [0, 19, -16, 33, 0, 0, -36] + with suppress_warnings() as sup: + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) + + def test_network_flow_limited_capacity(self): + # A network flow problem with supply and demand at nodes + # and with costs and capacities along directed edges. + # http://blog.sommer-forst.de/2013/04/10/ + c = [2, 2, 1, 3, 1] + bounds = [ + [0, 4], + [0, 2], + [0, 2], + [0, 3], + [0, 5]] + n, p = -1, 1 + A_eq = [ + [n, n, 0, 0, 0], + [p, 0, n, n, 0], + [0, p, p, 0, n], + [0, 0, 0, p, p]] + b_eq = [-4, 0, 0, 4] + + with suppress_warnings() as sup: + # this is an UmfpackWarning but I had trouble importing it + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=14) + + def test_simplex_algorithm_wikipedia_example(self): + # https://en.wikipedia.org/wiki/Simplex_algorithm#Example + c = [-2, -3, -4] + A_ub = [ + [3, 2, 1], + [2, 5, 3]] + b_ub = [10, 15] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-20) + + def test_enzo_example(self): + # https://github.com/scipy/scipy/issues/1779 lp2.py + # + # Translated from Octave code at: + # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm + # and placed under MIT licence by Enzo Michelangeli + # with permission explicitly granted by the original author, + # Prof. Kazunobu Yoshida + c = [4, 8, 3, 0, 0, 0] + A_eq = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b_eq = [185, 155, 600] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=317.5, + desired_x=[66.25, 0, 17.5, 0, 183.75, 0], + atol=6e-6, rtol=1e-7) + + def test_enzo_example_b(self): + # rescued from https://github.com/scipy/scipy/pull/218 + c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] + A_eq = [[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]] + b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-1.77, + desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) + + def test_enzo_example_c_with_degeneracy(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 20 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) + + def test_enzo_example_c_with_unboundedness(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + # This test relies on `cos(0) -1 == sin(0)`, so ensure that's true + # (SIMD code or -ffast-math may cause spurious failures otherwise) + row0 = np.cos(tmp) - 1 + row0[0] = 0.0 + row1 = np.sin(tmp) + row1[0] = 0.0 + A_eq = np.vstack((row0, row1)) + b_eq = [0, 0] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_enzo_example_c_with_infeasibility(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [1, 1] + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_basic_artificial_vars(self): + # Problem is chosen to test two phase simplex methods when at the end + # of phase 1 some artificial variables remain in the basis. + # Also, for `method='simplex'`, the row in the tableau corresponding + # with the artificial variables is not all zero. + c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) + A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], + [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], + [1.0, 1.0, 0, 0, 0, 0]]) + b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) + A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) + b_eq = np.array([0, 0]) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), + atol=2e-6) + + def test_optimize_result(self): + # check all fields in OptimizeResult + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + assert_(res.success) + assert_(res.nit) + assert_(not res.status) + if 'highs' not in self.method: + # HiGHS status/message tested separately + assert_(res.message == "Optimization terminated successfully.") + assert_allclose(c @ res.x, res.fun) + assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) + assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) + for key in ['eqlin', 'ineqlin', 'lower', 'upper']: + if key in res.keys(): + assert isinstance(res[key]['marginals'], np.ndarray) + assert isinstance(res[key]['residual'], np.ndarray) + + ################# + # Bug Fix Tests # + ################# + + def test_bug_5400(self): + # https://github.com/scipy/scipy/issues/5400 + bounds = [ + (0, None), + (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), + (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), + (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] + + f = 1 / 9 + g = -1e4 + h = -3.1 + A_ub = np.array([ + [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], + [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], + [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], + [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], + [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], + [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], + [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], + [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) + + b_ub = np.array([ + 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, + 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + + c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-106.63507541835018) + + def test_bug_6139(self): + # linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + + # Note: This is not strictly a bug as the default tolerance determines + # if a result is "close enough" to zero and should not be expected + # to work for all cases. + + c = np.array([1, 1, 1]) + A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) + b_eq = np.array([5.00000000e+00, -1.00000000e+04]) + A_ub = -np.array([[0., 1000000., 1010000.]]) + b_ub = -np.array([10000000.]) + bounds = (None, None) + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + _assert_success(res, desired_fun=14.95, + desired_x=np.array([5, 4.95, 5])) + + def test_bug_6690(self): + # linprog simplex used to violate bound constraint despite reporting + # success. + # https://github.com/scipy/scipy/issues/6690 + + A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) + b_eq = np.array([0.9626]) + A_ub = np.array([ + [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], + [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], + [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] + ]) + b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) + bounds = np.array([ + [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], + [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] + ]).T + c = np.array([ + -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 + ]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, + "Solving system with option 'cholesky'") + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = -1.19099999999 + desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, + 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) + _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) + + # Add small tol value to ensure arrays are less than or equal. + atol = 1e-6 + assert_array_less(bounds[:, 0] - atol, res.x) + assert_array_less(res.x, bounds[:, 1] + atol) + + def test_bug_7044(self): + # linprog simplex failed to "identify correct constraints" (?) + # leading to a non-optimal solution if A is rank-deficient. + # https://github.com/scipy/scipy/issues/7044 + + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + desired_fun = 1.730550597 + _assert_success(res, desired_fun=desired_fun) + assert_allclose(A_eq.dot(res.x), b_eq) + assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) + + def test_bug_7237(self): + # https://github.com/scipy/scipy/issues/7237 + # linprog simplex "explodes" when the pivot value is very + # close to zero. + + c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) + A_ub = np.array([ + [1., -724., 911., -551., -555., -896., 478., -80., -293.], + [1., 566., 42., 937., 233., 883., 392., -909., 57.], + [1., -208., -894., 539., 321., 532., -924., 942., 55.], + [1., 857., -859., 83., 462., -265., -971., 826., 482.], + [1., 314., -424., 245., -424., 194., -443., -104., -429.], + [1., 540., 679., 361., 149., -827., 876., 633., 302.], + [0., -1., -0., -0., -0., -0., -0., -0., -0.], + [0., -0., -1., -0., -0., -0., -0., -0., -0.], + [0., -0., -0., -1., -0., -0., -0., -0., -0.], + [0., -0., -0., -0., -1., -0., -0., -0., -0.], + [0., -0., -0., -0., -0., -1., -0., -0., -0.], + [0., -0., -0., -0., -0., -0., -1., -0., -0.], + [0., -0., -0., -0., -0., -0., -0., -1., -0.], + [0., -0., -0., -0., -0., -0., -0., -0., -1.], + [0., 1., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 1.] + ]) + b_ub = np.array([ + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) + A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) + b_eq = np.array([[1.]]) + bounds = [(None, None)] * 9 + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=108.568535, atol=1e-6) + + def test_bug_8174(self): + # https://github.com/scipy/scipy/issues/8174 + # The simplex method sometimes "explodes" if the pivot value is very + # close to zero. + A_ub = np.array([ + [22714, 1008, 13380, -2713.5, -1116], + [-4986, -1092, -31220, 17386.5, 684], + [-4986, 0, 0, -2713.5, 0], + [22714, 0, 0, 17386.5, 0]]) + b_ub = np.zeros(A_ub.shape[0]) + c = -np.ones(A_ub.shape[1]) + bounds = [(0, 1)] * A_ub.shape[1] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': + _assert_unable_to_find_basic_feasible_sol(res) + else: + _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) + + def test_bug_8174_2(self): + # Test supplementary example from issue 8174. + # https://github.com/scipy/scipy/issues/8174 + # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution + c = np.array([1, 0, 0, 0, 0, 0, 0]) + A_ub = -np.identity(7) + b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) + A_eq = np.array([ + [1, 1, 1, 1, 1, 1, 0], + [0.3, 1.3, 0.9, 0, 0, 0, -1], + [0.3, 0, 0, 0, 0, 0, -2/3], + [0, 0.65, 0, 0, 0, 0, -1/15], + [0, 0, 0.3, 0, 0, 0, -1/15] + ]) + b_eq = np.array([[100], [0], [0], [0], [0]]) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=43.3333333331385) + + def test_bug_8561(self): + # Test that pivot row is chosen correctly when using Bland's rule + # This was originally written for the simplex method with + # Bland's rule only, but it doesn't hurt to test all methods/options + # https://github.com/scipy/scipy/issues/8561 + c = np.array([7, 0, -4, 1.5, 1.5]) + A_ub = np.array([ + [4, 5.5, 1.5, 1.0, -3.5], + [1, -2.5, -2, 2.5, 0.5], + [3, -0.5, 4, -12.5, -7], + [-1, 4.5, 2, -3.5, -2], + [5.5, 2, -4.5, -1, 9.5]]) + b_ub = np.array([0, 0, 0, 0, 1]) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, + method=self.method) + _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) + + def test_bug_8662(self): + # linprog simplex used to report incorrect optimal results + # https://github.com/scipy/scipy/issues/8662 + c = [-10, 10, 6, 3] + A_ub = [[8, -8, -4, 6], + [-8, 8, 4, -6], + [-4, 4, 8, -4], + [3, -3, -3, -10]] + b_ub = [9, -9, -9, -4] + bounds = [(0, None), (0, None), (0, None), (0, None)] + desired_fun = 36.0000000000 + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + # Set boundary condition as a constraint + A_ub.append([0, 0, -1, 0]) + b_ub.append(0) + bounds[2] = (None, None) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + rtol = 1e-5 + _assert_success(res1, desired_fun=desired_fun, rtol=rtol) + _assert_success(res2, desired_fun=desired_fun, rtol=rtol) + + def test_bug_8663(self): + # exposed a bug in presolve + # https://github.com/scipy/scipy/issues/8663 + c = [1, 5] + A_eq = [[0, -7]] + b_eq = [-6] + bounds = [(0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + # tested for interior-point with presolve off in TestLinprogIPSpecific + # https://github.com/scipy/scipy/issues/8664 + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bug_8973(self): + """ + Test whether bug described at: + https://github.com/scipy/scipy/issues/8973 + was fixed. + """ + c = np.array([0, 0, 0, 1, -1]) + A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) + b_ub = np.array([2, -2]) + bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + # solution vector x is not unique + _assert_success(res, desired_fun=-2) + # HiGHS IPM had an issue where the following wasn't true! + assert_equal(c @ res.x, res.fun) + + def test_bug_8973_2(self): + """ + Additional test for: + https://github.com/scipy/scipy/issues/8973 + suggested in + https://github.com/scipy/scipy/pull/8985 + review by @antonior92 + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[-2], desired_fun=0) + + def test_bug_10124(self): + """ + Test for linprog docstring problem + 'disp'=True caused revised simplex failure + """ + c = np.zeros(1) + A_ub = np.array([[1]]) + b_ub = np.array([-2]) + bounds = (None, None) + c = [-1, 4] + A_ub = [[-3, 1], [1, 2]] + b_ub = [6, 4] + bounds = [(None, None), (-3, None)] + o = {"disp": True} + o.update(self.options) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_x=[10, -3], desired_fun=-22) + + def test_bug_10349(self): + """ + Test for redundancy removal tolerance issue + https://github.com/scipy/scipy/issues/10349 + """ + A_eq = np.array([[1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1]]) + b_eq = np.array([221, 210, 10, 141, 198, 102]) + c = np.concatenate((0, 1, np.zeros(4)), axis=None) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) + + @pytest.mark.skipif(sys.platform == 'darwin', + reason=("Failing on some local macOS builds, " + "see gh-13846")) + def test_bug_10466(self): + """ + Test that autoscale fixes poorly-scaled problem + """ + c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] + A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], + [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] + + b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, + 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, + 1.07374182e+09] + + o = {} + # HiGHS methods don't use autoscale option + if not self.method.startswith("highs"): + o = {"autoscale": True} + o.update(self.options) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option...") + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(RuntimeWarning, "divide by zero encountered...") + sup.filter(RuntimeWarning, "overflow encountered...") + sup.filter(RuntimeWarning, "invalid value encountered...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + assert_allclose(res.fun, -8589934560) + +######################### +# Method-specific Tests # +######################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogSimplexTests(LinprogCommonTests): + method = "simplex" + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogIPTests(LinprogCommonTests): + method = "interior-point" + + def test_bug_10466(self): + pytest.skip("Test is failing, but solver is deprecated.") + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class LinprogRSTests(LinprogCommonTests): + method = "revised simplex" + + # Revised simplex does not reliably solve these problems. + # Failure is intermittent due to the random choice of elements to complete + # the basis after phase 1 terminates. In any case, linprog exists + # gracefully, reporting numerical difficulties. I do not think this should + # prevent revised simplex from being merged, as it solves the problems + # most of the time and solves a broader range of problems than the existing + # simplex implementation. + # I believe that the root cause is the same for all three and that this + # same issue prevents revised simplex from solving many other problems + # reliably. Somehow the pivoting rule allows the algorithm to pivot into + # a singular basis. I haven't been able to find a reference that + # acknowledges this possibility, suggesting that there is a bug. On the + # other hand, the pivoting rule is quite simple, and I can't find a + # mistake, which suggests that this is a possibility with the pivoting + # rule. Hopefully, a better pivoting rule will fix the issue. + + def test_bug_5400(self): + pytest.skip("Intermittent failure acceptable.") + + def test_bug_8662(self): + pytest.skip("Intermittent failure acceptable.") + + def test_network_flow(self): + pytest.skip("Intermittent failure acceptable.") + + +class LinprogHiGHSTests(LinprogCommonTests): + def test_callback(self): + # this is the problem from test_callback + def cb(res): + return None + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, + callback=cb, method=self.method) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + + @pytest.mark.parametrize("options", + [{"maxiter": -1}, + {"disp": -1}, + {"presolve": -1}, + {"time_limit": -1}, + {"dual_feasibility_tolerance": -1}, + {"primal_feasibility_tolerance": -1}, + {"ipm_optimality_tolerance": -1}, + {"simplex_dual_edge_weight_strategy": "ekki"}, + ]) + def test_invalid_option_values(self, options): + def f(options): + linprog(1, method=self.method, options=options) + options.update(self.options) + assert_warns(OptimizeWarning, f, options=options) + + def test_crossover(self): + A_eq, b_eq, c, _, _ = magic_square(4) + bounds = (0, 1) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + # there should be nonzero crossover iterations for IPM (only) + assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") + + def test_marginals(self): + # Ensure lagrange multipliers are correct by comparing the derivative + # w.r.t. b_ub/b_eq/ub/lb to the reported duals. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + lb, ub = bounds.T + + # sensitivity w.r.t. b_ub + def f_bub(x): + return linprog(c, A_ub, x, A_eq, b_eq, bounds, + method=self.method).fun + + dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun) + assert_allclose(res.ineqlin.marginals, dfdbub) + + # sensitivity w.r.t. b_eq + def f_beq(x): + return linprog(c, A_ub, b_ub, A_eq, x, bounds, + method=self.method).fun + + dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun) + assert_allclose(res.eqlin.marginals, dfdbeq) + + # sensitivity w.r.t. lb + def f_lb(x): + bounds = np.array([x, ub]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + # approx_derivative has trouble where lb is infinite + dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun) + dfdlb[~np.isfinite(lb)] = 0 + + assert_allclose(res.lower.marginals, dfdlb) + + # sensitivity w.r.t. ub + def f_ub(x): + bounds = np.array([lb, x]).T + return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method).fun + + with np.errstate(invalid='ignore'): + dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun) + dfdub[~np.isfinite(ub)] = 0 + + assert_allclose(res.upper.marginals, dfdub) + + def test_dual_feasibility(self): + # Ensure solution is dual feasible using marginals + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT dual feasibility equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf + resid = (-c + A_ub.T @ res.ineqlin.marginals + + A_eq.T @ res.eqlin.marginals + + res.upper.marginals + + res.lower.marginals) + assert_allclose(resid, 0, atol=1e-12) + + def test_complementary_slackness(self): + # Ensure that the complementary slackness condition is satisfied. + c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + + # KKT complementary slackness equation from Theorem 1 from + # http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for + # non-zero RHS + assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0) + + +################################ +# Simplex Option-Specific Tests# +################################ + + +class TestLinprogSimplexDefault(LinprogSimplexTests): + + def setup_method(self): + self.options = {} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_7237_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexBland(LinprogSimplexTests): + + def setup_method(self): + self.options = {'bland': True} + + def test_bug_5400(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate error is raised. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError): + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + +class TestLinprogSimplexNoPresolve(LinprogSimplexTests): + + def setup_method(self): + self.options = {'presolve': False} + + is_32_bit = np.intp(0).itemsize < 8 + is_linux = sys.platform.startswith('linux') + + @pytest.mark.xfail( + condition=is_32_bit and is_linux, + reason='Fails with warning on 32-bit linux') + def test_bug_5400(self): + super().test_bug_5400() + + def test_bug_6139_low_tol(self): + # Linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + # Without ``presolve`` eliminating such rows the result is incorrect. + self.options.update({'tol': 1e-12}) + with pytest.raises(AssertionError, match='linprog status 4'): + return super().test_bug_6139() + + def test_bug_7237_low_tol(self): + pytest.skip("Simplex fails on this problem.") + + def test_bug_8174_low_tol(self): + # Fails if the tolerance is too strict. Here, we test that + # even if the solution is wrong, the appropriate warning is issued. + self.options.update({'tol': 1e-12}) + with pytest.warns(OptimizeWarning): + super().test_bug_8174() + + def test_unbounded_no_nontrivial_constraints_1(self): + pytest.skip("Tests behavior specific to presolve") + + def test_unbounded_no_nontrivial_constraints_2(self): + pytest.skip("Tests behavior specific to presolve") + + +####################################### +# Interior-Point Option-Specific Tests# +####################################### + + +class TestLinprogIPDense(LinprogIPTests): + options = {"sparse": False} + + # see https://github.com/scipy/scipy/issues/20216 for skip reason + @pytest.mark.skipif( + sys.platform == 'darwin', + reason="Fails on some macOS builds for reason not relevant to test" + ) + def test_bug_6139(self): + super().test_bug_6139() + +if has_cholmod: + class TestLinprogIPSparseCholmod(LinprogIPTests): + options = {"sparse": True, "cholesky": True} + + +if has_umfpack: + class TestLinprogIPSparseUmfpack(LinprogIPTests): + options = {"sparse": True, "cholesky": False} + + def test_network_flow_limited_capacity(self): + pytest.skip("Failing due to numerical issues on some platforms.") + + +class TestLinprogIPSparse(LinprogIPTests): + options = {"sparse": True, "cholesky": False, "sym_pos": False} + + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + def test_magic_square_sparse_no_presolve(self): + # test linprog with a problem with a rank-deficient A_eq matrix + A_eq, b_eq, c, _, _ = magic_square(3) + bounds = (0, 1) + + with suppress_warnings() as sup: + if has_umfpack: + sup.filter(UmfpackWarning) + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + sup.filter(OptimizeWarning, "Solving system with option...") + + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + def test_sparse_solve_options(self): + # checking that problem is solved with all column permutation options + A_eq, b_eq, c, _, _ = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Invalid permc_spec option") + o = {key: self.options[key] for key in self.options} + permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', + 'COLAMD', 'ekki-ekki-ekki') + # 'ekki-ekki-ekki' raises warning about invalid permc_spec option + # and uses default + for permc_spec in permc_specs: + o["permc_spec"] = permc_spec + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + +class TestLinprogIPSparsePresolve(LinprogIPTests): + options = {"sparse": True, "_sparse_presolve": True} + + @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " + "perturbations in linear system solution in " + "_linprog_ip._sym_solve.") + def test_bug_6139(self): + super().test_bug_6139() + + def test_enzo_example_c_with_infeasibility(self): + pytest.skip('_sparse_presolve=True incompatible with presolve=False') + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super().test_bug_6690() + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class TestLinprogIPSpecific: + method = "interior-point" + # the following tests don't need to be performed separately for + # sparse presolve, sparse after presolve, and dense + + def test_solver_select(self): + # check that default solver is selected as expected + if has_cholmod: + options = {'sparse': True, 'cholesky': True} + elif has_umfpack: + options = {'sparse': True, 'cholesky': False} + else: + options = {'sparse': True, 'cholesky': False, 'sym_pos': False} + A, b, c = lpgen_2d(20, 20) + res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) + res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver + assert_allclose(res1.fun, res2.fun, + err_msg="linprog default solver unexpected result", + rtol=2e-15, atol=1e-15) + + def test_unbounded_below_no_presolve_original(self): + # formerly caused segfault in TravisCI w/ "cholesky":True + c = [-1] + bounds = [(None, 1)] + res = linprog(c=c, bounds=bounds, + method=self.method, + options={"presolve": False, "cholesky": True}) + _assert_success(res, desired_fun=-1) + + def test_cholesky(self): + # use cholesky factorization and triangular solves + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"cholesky": True}) # only for dense + _assert_success(res, desired_fun=-64.049494229) + + def test_alternate_initial_point(self): + # use "improved" initial point + A, b, c = lpgen_2d(20, 20) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "Solving system with option...") + sup.filter(LinAlgWarning, "Ill-conditioned matrix...") + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"ip": True, "disp": True}) + # ip code is independent of sparse/dense + _assert_success(res, desired_fun=-64.049494229) + + def test_bug_8664(self): + # interior-point has trouble with this when presolve is off + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options={"presolve": False}) + assert_(not res.success, "Incorrectly reported success") + + +######################################## +# Revised Simplex Option-Specific Tests# +######################################## + + +class TestLinprogRSCommon(LinprogRSTests): + options = {} + + def test_cyclic_bland(self): + pytest.skip("Intermittent failure acceptable.") + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_unbounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, None), (None, None), (0, None), (None, None)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bounded_variables(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bounds = [(None, 1), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_negative_unbounded_variable(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + b_eq = [4] + x_star = np.array([-219/385, 582/385, 0, 4/10]) + f_star = 3951/385 + bounds = [(None, None), (1, None), (0, None), (.4, .6)] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + def test_redundant_constraints_with_guess(self): + A, b, c, _, _ = magic_square(3) + p = np.random.rand(*c.shape) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(RuntimeWarning, "invalid value encountered") + sup.filter(LinAlgWarning) + res = linprog(c, A_eq=A, b_eq=b, method=self.method) + res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) + res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) + _assert_success(res2, desired_fun=1.730550597) + assert_equal(res2.nit, 0) + _assert_success(res3) + assert_(res3.nit < res.nit) # hot start reduces iterations + + +class TestLinprogRSBland(LinprogRSTests): + options = {"pivot": "bland"} + + +############################################ +# HiGHS-Simplex-Dual Option-Specific Tests # +############################################ + + +class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): + method = "highs-ds" + options = {} + + def test_lad_regression(self): + ''' + The scaled model should be optimal, i.e. not produce unscaled model + infeasible. See https://github.com/ERGO-Code/HiGHS/issues/494. + ''' + # Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled + # and unscaled model statuses) + c, A_ub, b_ub, bnds = l1_regression_prob() + res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, + method=self.method, options=self.options) + assert_equal(res.status, 0) + assert_(res.x is not None) + assert_(np.all(res.slack > -1e-6)) + assert_(np.all(res.x <= [np.inf if ub is None else ub + for lb, ub in bnds])) + assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7 + for lb, ub in bnds])) + + +################################### +# HiGHS-IPM Option-Specific Tests # +################################### + + +class TestLinprogHiGHSIPM(LinprogHiGHSTests): + method = "highs-ipm" + options = {} + + +################################### +# HiGHS-MIP Option-Specific Tests # +################################### + + +class TestLinprogHiGHSMIP: + method = "highs" + options = {} + + @pytest.mark.xfail(condition=(sys.maxsize < 2 ** 32 and + platform.system() == "Linux"), + run=False, + reason="gh-16347") + def test_mip1(self): + # solve non-relaxed magic square problem (finally!) + # also check that values are all integers - they don't always + # come out of HiGHS that way + n = 4 + A, b, c, numbers, M = magic_square(n) + bounds = [(0, 1)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c*0, A_eq=A, b_eq=b, bounds=bounds, + method=self.method, integrality=integrality) + + s = (numbers.flatten() * res.x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + np.testing.assert_allclose(res.x, np.round(res.x), atol=1e-12) + + def test_mip2(self): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + + # use all array inputs to test gh-16681 (integrality couldn't be array) + A_ub = np.array([[2, -2], [-8, 10]]) + b_ub = np.array([-1, 13]) + c = -np.array([1, 1]) + + bounds = np.array([(0, np.inf)] * len(c)) + integrality = np.ones_like(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [1, 2]) + np.testing.assert_allclose(res.fun, -3) + + def test_mip3(self): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) + b_ub = np.array([1, 12, 12]) + c = -np.array([0, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [1] * len(c) + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + def test_mip4(self): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[-1, -2], [-4, -1], [2, 1]]) + b_ub = np.array([14, -33, 20]) + c = np.array([8, 1]) + + bounds = [(0, np.inf)] * len(c) + integrality = [0, 1] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.x, [6.5, 7]) + np.testing.assert_allclose(res.fun, 59) + + def test_mip5(self): + # solve MIP with inequality and inequality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_ub = np.array([[1, 1, 1]]) + b_ub = np.array([7]) + A_eq = np.array([[4, 2, 1]]) + b_eq = np.array([12]) + c = np.array([-3, -2, -1]) + + bounds = [(0, np.inf), (0, np.inf), (0, 1)] + integrality = [0, 1, 0] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality) + + np.testing.assert_allclose(res.x, [0, 6, 0]) + np.testing.assert_allclose(res.fun, -12) + + # gh-16897: these fields were not present, ensure that they are now + assert res.get("mip_node_count", None) is not None + assert res.get("mip_dual_bound", None) is not None + assert res.get("mip_gap", None) is not None + + @pytest.mark.slow + @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job + def test_mip6(self): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + @pytest.mark.xslow + def test_mip_rel_gap_passdown(self): + # MIP taken from test_mip6, solved with different values of mip_rel_gap + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + bounds = [(0, np.inf)]*8 + integrality = [1]*8 + + mip_rel_gaps = [0.5, 0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, + integrality=integrality, + options={"mip_rel_gap": mip_rel_gap}) + final_mip_gap = res["mip_gap"] + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert final_mip_gap <= mip_rel_gap + sol_mip_gaps.append(final_mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. np.diff does + # x[i+1] - x[i], so flip the array before differencing to get + # what should be a positive, monotone decreasing series of solution + # gaps + gap_diffs = np.diff(np.flip(sol_mip_gaps)) + assert np.all(gap_diffs >= 0) + assert not np.all(gap_diffs == 0) + + def test_semi_continuous(self): + # See issue #18106. This tests whether the solution is being + # checked correctly (status is 0) when integrality > 1: + # values are allowed to be 0 even if 0 is out of bounds. + + c = np.array([1., 1., -1, -1]) + bounds = np.array([[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0.5, 1.5]]) + integrality = np.array([2, 3, 2, 3]) + + res = linprog(c, bounds=bounds, + integrality=integrality, method='highs') + + np.testing.assert_allclose(res.x, [0, 0, 1.5, 1]) + assert res.status == 0 + + +########################### +# Autoscale-Specific Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class AutoscaleTests: + options = {"autoscale": True} + + test_bug_6139 = LinprogCommonTests.test_bug_6139 + test_bug_6690 = LinprogCommonTests.test_bug_6690 + test_bug_7237 = LinprogCommonTests.test_bug_7237 + + +class TestAutoscaleIP(AutoscaleTests): + method = "interior-point" + + def test_bug_6139(self): + self.options['tol'] = 1e-10 + return AutoscaleTests.test_bug_6139(self) + + +class TestAutoscaleSimplex(AutoscaleTests): + method = "simplex" + + +class TestAutoscaleRS(AutoscaleTests): + method = "revised simplex" + + def test_nontrivial_problem_with_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=x_star) + _assert_success(res, desired_fun=f_star, desired_x=x_star) + assert_equal(res.nit, 0) + + def test_nontrivial_problem_with_bad_guess(self): + c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() + bad_guess = [1, 2, 3, .5] + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options, x0=bad_guess) + assert_equal(res.status, 6) + + +########################### +# Redundancy Removal Tests# +########################### + + +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class RRTests: + method = "interior-point" + LCT = LinprogCommonTests + # these are a few of the existing tests that have redundancy + test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility + test_bug_10349 = LCT.test_bug_10349 + test_bug_7044 = LCT.test_bug_7044 + test_NFLC = LCT.test_network_flow_limited_capacity + test_enzo_example_b = LCT.test_enzo_example_b + + +class TestRRSVD(RRTests): + options = {"rr_method": "SVD"} + + +class TestRRPivot(RRTests): + options = {"rr_method": "pivot"} + + +class TestRRID(RRTests): + options = {"rr_method": "ID"} diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py new file mode 100644 index 0000000000000000000000000000000000000000..650deedce88b6babd8a3f2b62a5839f1a6cb966c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py @@ -0,0 +1,297 @@ +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +import numpy as np + +from scipy.optimize._lsq.common import ( + step_size_to_bound, find_active_constraints, make_strictly_feasible, + CL_scaling_vector, intersect_trust_region, build_quadratic_1d, + minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, + left_multiplied_operator, right_multiplied_operator) + + +class TestBounds: + def test_step_size_to_bounds(self): + lb = np.array([-1.0, 2.5, 10.0]) + ub = np.array([1.0, 5.0, 100.0]) + x = np.array([0.0, 2.5, 12.0]) + + s = np.array([0.1, 0.0, 0.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 10) + assert_equal(hits, [1, 0, 0]) + + s = np.array([0.01, 0.05, -1.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 2) + assert_equal(hits, [0, 0, -1]) + + s = np.array([10.0, -0.0001, 100.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.array(-0)) + assert_equal(hits, [0, -1, 0]) + + s = np.array([1.0, 0.5, -2.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 1.0) + assert_equal(hits, [1, 0, -1]) + + s = np.zeros(3) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.inf) + assert_equal(hits, [0, 0, 0]) + + def test_find_active_constraints(self): + lb = np.array([0.0, -10.0, 1.0]) + ub = np.array([1.0, 0.0, 100.0]) + + x = np.array([0.5, -5.0, 2.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 0]) + + x = np.array([0.0, 0.0, 10.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + x = np.array([1e-9, -1e-8, 100 - 1e-9]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 1]) + + active = find_active_constraints(x, lb, ub, rtol=1.5e-9) + assert_equal(active, [-1, 0, 1]) + + lb = np.array([1.0, -np.inf, -np.inf]) + ub = np.array([np.inf, 10.0, np.inf]) + + x = np.ones(3) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 0, 0]) + + # Handles out-of-bound cases. + x = np.array([0.0, 11.0, 0.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + def test_make_strictly_feasible(self): + lb = np.array([-0.5, -0.8, 2.0]) + ub = np.array([0.8, 1.0, 3.0]) + + x = np.array([-0.5, 0.0, 2 + 1e-10]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(x_new[0] > -0.5) + assert_equal(x_new[1:], x[1:]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) + assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) + + x = np.array([-0.5, -1, 3.1]) + x_new = make_strictly_feasible(x, lb, ub) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + lb = np.array([-1, 100.0]) + ub = np.array([1, 100.0 + 1e-10]) + x = np.array([0, 100.0]) + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) + assert_equal(x_new, [0, 100.0 + 0.5e-10]) + + def test_scaling_vector(self): + lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) + ub = np.array([1.0, np.inf, 10.0, np.inf]) + x = np.array([0.5, 2.0, 5.0, 0.0]) + g = np.array([1.0, 0.1, -10.0, 0.0]) + v, dv = CL_scaling_vector(x, g, lb, ub) + assert_equal(v, [1.0, 7.0, 5.0, 1.0]) + assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) + + +class TestQuadraticFunction: + def setup_method(self): + self.J = np.array([ + [0.1, 0.2], + [-1.0, 1.0], + [0.5, 0.2]]) + self.g = np.array([0.8, -2.0]) + self.diag = np.array([1.0, 2.0]) + + def test_build_quadratic_1d(self): + s = np.zeros(2) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 0) + assert_equal(b, 0) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 0) + assert_equal(b, 0) + + s = np.array([1.0, -1.0]) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 2.05) + assert_equal(b, 2.8) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 3.55) + assert_equal(b, 2.8) + + s0 = np.array([0.5, 0.5]) + a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) + assert_equal(a, 3.55) + assert_allclose(b, 2.39) + assert_allclose(c, -0.1525) + + def test_minimize_quadratic_1d(self): + a = 5 + b = -1 + + t, y = minimize_quadratic_1d(a, b, 1, 2) + assert_equal(t, 1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -2, -1) + assert_equal(t, -1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -1, 1) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) + + c = 10 + t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) + assert_equal(t, 0) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + a = -1 + b = 0.2 + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf) + assert_equal(t, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0) + assert_equal(t, -np.inf) + assert_equal(y, -np.inf) + + def test_evaluate_quadratic(self): + s = np.array([1.0, -1.0]) + + value = evaluate_quadratic(self.J, self.g, s) + assert_equal(value, 4.85) + + value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_equal(value, 6.35) + + s = np.array([[1.0, -1.0], + [1.0, 1.0], + [0.0, 0.0]]) + + values = evaluate_quadratic(self.J, self.g, s) + assert_allclose(values, [4.85, -0.91, 0.0]) + + values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_allclose(values, [6.35, 0.59, 0.0]) + + +class TestTrustRegion: + def test_intersect(self): + Delta = 1.0 + + x = np.zeros(3) + s = np.array([1.0, 0.0, 0.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_equal(t_neg, -1) + assert_equal(t_pos, 1) + + s = np.array([-1.0, 1.0, -1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -3**-0.5) + assert_allclose(t_pos, 3**-0.5) + + x = np.array([0.5, -0.5, 0]) + s = np.array([0, 0, 1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -2**-0.5) + assert_allclose(t_pos, 2**-0.5) + + x = np.ones(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + x = np.zeros(3) + s = np.zeros(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + +def test_reflective_transformation(): + lb = np.array([-1, -2], dtype=float) + ub = np.array([5, 3], dtype=float) + + y = np.array([0, 0]) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, y) + assert_equal(g, np.ones(2)) + + y = np.array([-4, 4], dtype=float) + + x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf])) + assert_equal(x, [2, 4]) + assert_equal(g, [-1, 1]) + + x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub) + assert_equal(x, [-4, 2]) + assert_equal(g, [1, -1]) + + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [2, 2]) + assert_equal(g, [-1, -1]) + + lb = np.array([-np.inf, -2]) + ub = np.array([5, np.inf]) + y = np.array([10, 10], dtype=float) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [0, 10]) + assert_equal(g, [-1, 1]) + + +def test_linear_operators(): + A = np.arange(6).reshape((3, 2)) + + d_left = np.array([-1, 2, 5]) + DA = np.diag(d_left).dot(A) + J_left = left_multiplied_operator(A, d_left) + + d_right = np.array([5, 10]) + AD = A.dot(np.diag(d_right)) + J_right = right_multiplied_operator(A, d_right) + + x = np.array([-2, 3]) + X = -2 * np.arange(2, 8).reshape((2, 3)) + xt = np.array([0, -2, 15]) + + assert_allclose(DA.dot(x), J_left.matvec(x)) + assert_allclose(DA.dot(X), J_left.matmat(X)) + assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) + + assert_allclose(AD.dot(x), J_right.matvec(x)) + assert_allclose(AD.dot(X), J_right.matmat(X)) + assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt)) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..348f4fc1350c0c92f3652a594c61818c19e2cde2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py @@ -0,0 +1,284 @@ +import pytest + +import numpy as np +from numpy.linalg import lstsq +from numpy.testing import assert_allclose, assert_equal, assert_ + +from scipy.sparse import rand, coo_matrix +from scipy.sparse.linalg import aslinearoperator +from scipy.optimize import lsq_linear +from scipy.optimize._minimize import Bounds + + +A = np.array([ + [0.171, -0.057], + [-0.049, -0.248], + [-0.166, 0.054], +]) +b = np.array([0.074, 1.014, -0.383]) + + +class BaseMixin: + def setup_method(self): + self.rnd = np.random.RandomState(0) + + def test_dense_no_bounds(self): + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.x, res.unbounded_sol[0]) + + def test_dense_bounds(self): + # Solutions for comparison are taken from MATLAB. + lb = np.array([-1, -10]) + ub = np.array([1, 0]) + unbounded_sol = lstsq(A, b, rcond=-1)[0] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0.0, -np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.0, -4.084174437334673]), + atol=1e-6) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([-1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.448427311733504, 0]), + atol=1e-15) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([np.inf, -5]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-0.105560998682388, -5])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + ub = np.array([-1, np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-1, -4.181102129483254])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + lb = np.array([0, -4]) + ub = np.array([1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.005236663400791, -4])) + assert_allclose(res.unbounded_sol[0], unbounded_sol) + + def test_bounds_variants(self): + x = np.array([1, 3]) + A = self.rnd.uniform(size=(2, 2)) + b = A@x + lb = np.array([1, 1]) + ub = np.array([2, 2]) + bounds_old = (lb, ub) + bounds_new = Bounds(lb, ub) + res_old = lsq_linear(A, b, bounds_old) + res_new = lsq_linear(A, b, bounds_new) + assert not np.allclose(res_new.x, res_new.unbounded_sol[0]) + assert_allclose(res_old.x, res_new.x) + + def test_np_matrix(self): + # gh-10711 + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]]) + k = np.array([20, 15]) + lsq_linear(A, k) + + def test_dense_rank_deficient(self): + A = np.array([[-0.307, -0.184]]) + b = np.array([0.773]) + lb = [-0.1, -0.1] + ub = [0.1, 0.1] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, [-0.1, -0.1]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + A = np.array([ + [0.334, 0.668], + [-0.516, -1.032], + [0.192, 0.384], + ]) + b = np.array([-1.436, 0.135, 0.909]) + lb = [0, -1] + ub = [1, -0.5] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.optimality, 0, atol=1e-11) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + def test_full_result(self): + lb = np.array([0, -4]) + ub = np.array([1, 0]) + res = lsq_linear(A, b, (lb, ub), method=self.method) + + assert_allclose(res.x, [0.005236663400791, -4]) + assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) + + r = A.dot(res.x) - b + assert_allclose(res.cost, 0.5 * np.dot(r, r)) + assert_allclose(res.fun, r) + + assert_allclose(res.optimality, 0.0, atol=1e-12) + assert_equal(res.active_mask, [0, -1]) + assert_(res.nit < 15) + assert_(res.status == 1 or res.status == 3) + assert_(isinstance(res.message, str)) + assert_(res.success) + + # This is a test for issue #9982. + def test_almost_singular(self): + A = np.array( + [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789], + [0.3742460132129041, 0.0130523214078376, 0.0130523214077873], + [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]]) + + b = np.array( + [0.0055029366538097, 0.0026677442422208, 0.0066612514782381]) + + result = lsq_linear(A, b, method=self.method) + assert_(result.cost < 1.1e-8) + + @pytest.mark.xslow + def test_large_rank_deficient(self): + np.random.seed(0) + n, m = np.sort(np.random.randint(2, 1000, size=2)) + m *= 2 # make m >> n + A = 1.0 * np.random.randint(-99, 99, size=[m, n]) + b = 1.0 * np.random.randint(-99, 99, size=[m]) + bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0) + bounds[1, :] += 1.0 # ensure up > lb + + # Make the A matrix strongly rank deficient by replicating some columns + w = np.random.choice(n, n) # Select random columns with duplicates + A = A[:, w] + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + def test_convergence_small_matrix(self): + A = np.array([[49.0, 41.0, -32.0], + [-19.0, -32.0, -8.0], + [-13.0, 10.0, 69.0]]) + b = np.array([-41.0, -90.0, 47.0]) + bounds = np.array([[31.0, -44.0, 26.0], + [54.0, -32.0, 28.0]]) + + x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x + x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x + + cost_bvls = np.sum((A @ x_bvls - b)**2) + cost_trf = np.sum((A @ x_trf - b)**2) + + assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) + + +class SparseMixin: + def test_sparse_and_LinearOperator(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + A = aslinearoperator(A) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + def test_sparse_bounds(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + lb = self.rnd.randn(n) + ub = lb + 1 + res = lsq_linear(A, b, (lb, ub)) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13, + lsmr_maxiter=1500) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto') + assert_allclose(res.optimality, 0.0, atol=1e-6) + + def test_sparse_ill_conditioned(self): + # Sparse matrix with condition number of ~4 million + data = np.array([1., 1., 1., 1. + 1e-6, 1.]) + row = np.array([0, 0, 1, 2, 2]) + col = np.array([0, 2, 1, 0, 2]) + A = coo_matrix((data, (row, col)), shape=(3, 3)) + + # Get the exact solution + exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact') + + # Default lsmr arguments should not fully converge the solution + default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr') + with pytest.raises(AssertionError, match=""): + assert_allclose(exact_sol.x, default_lsmr_sol.x) + + # By increasing the maximum lsmr iters, it will converge + conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10) + assert_allclose(exact_sol.x, conv_lsmr.x) + + +class TestTRF(BaseMixin, SparseMixin): + method = 'trf' + lsq_solvers = ['exact', 'lsmr'] + + +class TestBVLS(BaseMixin): + method = 'bvls' + lsq_solvers = ['exact'] + + +class TestErrorChecking: + def test_option_lsmr_tol(self): + # Should work with a positive float, string equal to 'auto', or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto') + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None) + + # Should raise error with negative float, strings + # other than 'auto', and integers + err_message = "`lsmr_tol` must be None, 'auto', or positive float." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo') + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1) + + def test_option_lsmr_maxiter(self): + # Should work with positive integers or None + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1) + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None) + + # Should raise error with 0 or negative max iter + err_message = "`lsmr_maxiter` must be None or positive integer." + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0) + with pytest.raises(ValueError, match=err_message): + _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..0970a15a8bccc3deb7bc67f7b62763947c1b237c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py @@ -0,0 +1,385 @@ +""" +Unit test for Mixed Integer Linear Programming +""" +import re + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest + +from .test_linprog import magic_square +from scipy.optimize import milp, Bounds, LinearConstraint +from scipy import sparse + + +def test_milp_iv(): + + message = "`c` must be a dense array" + with pytest.raises(ValueError, match=message): + milp(sparse.coo_array([0, 0])) + + message = "`c` must be a one-dimensional array of finite numbers with" + with pytest.raises(ValueError, match=message): + milp(np.zeros((3, 4))) + with pytest.raises(ValueError, match=message): + milp([]) + with pytest.raises(ValueError, match=message): + milp(None) + + message = "`bounds` must be convertible into an instance of..." + with pytest.raises(ValueError, match=message): + milp(1, bounds=10) + + message = "`constraints` (or each element within `constraints`) must be" + with pytest.raises(ValueError, match=re.escape(message)): + milp(1, constraints=10) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3])) + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(2), constraints=([[1, 2]], [2], sparse.coo_array([2]))) + + message = "The shape of `A` must be (len(b_l), len(c))." + with pytest.raises(ValueError, match=re.escape(message)): + milp(np.zeros(3), constraints=([[1, 2]], [2], [2])) + + message = "`integrality` must be a dense array" + with pytest.raises(ValueError, match=message): + milp([1, 2], integrality=sparse.coo_array([1, 2])) + + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 2]) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], integrality=[1, 5, 3]) + + message = "Lower and upper bounds must be dense arrays." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], sparse.coo_array([3, 4]))) + + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [4, 5])) + + message = "`bounds.lb` and `bounds.ub` must contain reals and..." + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2], [3, 4])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5])) + with pytest.raises(ValueError, match=message): + milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5])) + + +@pytest.mark.xfail(run=False, + reason="Needs to be fixed in `_highs_wrapper`") +def test_milp_options(capsys): + # run=False now because of gh-16347 + message = "Unrecognized options detected: {'ekki'}..." + options = {'ekki': True} + with pytest.warns(RuntimeWarning, match=message): + milp(1, options=options) + + A, b, c, numbers, M = magic_square(3) + options = {"disp": True, "presolve": False, "time_limit": 0.05} + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options=options) + + captured = capsys.readouterr() + assert "Presolve is switched off" in captured.out + assert "Time Limit Reached" in captured.out + assert not res.success + + +def test_result(): + A, b, c, numbers, M = magic_square(3) + res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1) + assert res.status == 0 + assert res.success + msg = "Optimization terminated successfully. (HiGHS Status 7:" + assert res.message.startswith(msg) + assert isinstance(res.x, np.ndarray) + assert isinstance(res.fun, float) + assert isinstance(res.mip_node_count, int) + assert isinstance(res.mip_dual_bound, float) + assert isinstance(res.mip_gap, float) + + A, b, c, numbers, M = magic_square(6) + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1, + options={'time_limit': 0.05}) + assert res.status == 1 + assert not res.success + msg = "Time limit reached. (HiGHS Status 13:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(1, bounds=(1, -1)) + assert res.status == 2 + assert not res.success + msg = "The problem is infeasible. (HiGHS Status 8:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + res = milp(-1) + assert res.status == 3 + assert not res.success + msg = "The problem is unbounded. (HiGHS Status 10:" + assert res.message.startswith(msg) + assert (res.fun is res.mip_dual_bound is res.mip_gap + is res.mip_node_count is res.x is None) + + +def test_milp_optional_args(): + # check that arguments other than `c` are indeed optional + res = milp(1) + assert res.fun == 0 + assert_array_equal(res.x, [0]) + + +def test_milp_1(): + # solve magic square problem + n = 3 + A, b, c, numbers, M = magic_square(n) + A = sparse.csc_array(A) # confirm that sparse arrays are accepted + res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1) + + # check that solution is a magic square + x = np.round(res.x) + s = (numbers.flatten() * x).reshape(n**2, n, n) + square = np.sum(s, axis=0) + np.testing.assert_allclose(square.sum(axis=0), M) + np.testing.assert_allclose(square.sum(axis=1), M) + np.testing.assert_allclose(np.diag(square).sum(), M) + np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) + + +def test_milp_2(): + # solve MIP with inequality constraints and all integer constraints + # source: slide 5, + # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf + # also check that `milp` accepts all valid ways of specifying constraints + c = -np.ones(2) + A = [[-2, 2], [-8, 10]] + b_l = [1, -np.inf] + b_u = [np.inf, 13] + linear_constraint = LinearConstraint(A, b_l, b_u) + + # solve original problem + res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True) + res2 = milp(c=c, constraints=linear_constraint, integrality=True) + res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True) + res4 = milp(c=c, constraints=[linear_constraint], integrality=True) + res5 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + (A[1:], b_l[1:], b_u[1:])]) + res6 = milp(c=c, integrality=True, + constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + res7 = milp(c=c, integrality=True, + constraints=[(A[:1], b_l[:1], b_u[:1]), + LinearConstraint(A[1:], b_l[1:], b_u[1:])]) + xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x]) + funs = np.array([res1.fun, res2.fun, res3.fun, + res4.fun, res5.fun, res6.fun, res7.fun]) + np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape)) + np.testing.assert_allclose(funs, -3) + + # solve relaxed problem + res = milp(c=c, constraints=(A, b_l, b_u)) + np.testing.assert_allclose(res.x, [4, 4.5]) + np.testing.assert_allclose(res.fun, -8.5) + + +def test_milp_3(): + # solve MIP with inequality constraints and all integer constraints + # source: https://en.wikipedia.org/wiki/Integer_programming#Example + c = [0, -1] + A = [[-1, 1], [3, 2], [2, 3]] + b_u = [1, 12, 12] + b_l = np.full_like(b_u, -np.inf, dtype=np.float64) + constraints = LinearConstraint(A, b_l, b_u) + + integrality = np.ones_like(c) + + # solve original problem + res = milp(c=c, constraints=constraints, integrality=integrality) + assert_allclose(res.fun, -2) + # two optimal solutions possible, just need one of them + assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) + + # solve relaxed problem + res = milp(c=c, constraints=constraints) + assert_allclose(res.fun, -2.8) + assert_allclose(res.x, [1.8, 2.8]) + + +def test_milp_4(): + # solve MIP with inequality constraints and only one integer constraint + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [8, 1] + integrality = [0, 1] + A = [[1, 2], [-4, -1], [2, 1]] + b_l = [-14, -np.inf, -np.inf] + b_u = [np.inf, -33, 20] + constraints = LinearConstraint(A, b_l, b_u) + bounds = Bounds(-np.inf, np.inf) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + assert_allclose(res.fun, 59) + assert_allclose(res.x, [6.5, 7]) + + +def test_milp_5(): + # solve MIP with inequality and equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + c = [-3, -2, -1] + integrality = [0, 0, 1] + lb = [0, 0, 0] + ub = [np.inf, np.inf, 1] + bounds = Bounds(lb, ub) + A = [[1, 1, 1], [4, 2, 1]] + b_l = [-np.inf, 12] + b_u = [7, 12] + constraints = LinearConstraint(A, b_l, b_u) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + # there are multiple solutions + assert_allclose(res.fun, -12) + + +@pytest.mark.slow +@pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job +def test_milp_6(): + # solve a larger MIP with only equality constraints + # source: https://www.mathworks.com/help/optim/ug/intlinprog.html + integrality = 1 + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality) + + np.testing.assert_allclose(res.fun, 1854) + + +def test_infeasible_prob_16609(): + # Ensure presolve does not mark trivially infeasible problems + # as Optimal -- see gh-16609 + c = [1.0, 0.0] + integrality = [0, 1] + + lb = [0, -np.inf] + ub = [np.inf, np.inf] + bounds = Bounds(lb, ub) + + A_eq = [[0.0, 1.0]] + b_eq = [0.5] + constraints = LinearConstraint(A_eq, b_eq, b_eq) + + res = milp(c, integrality=integrality, bounds=bounds, + constraints=constraints) + np.testing.assert_equal(res.status, 2) + + +_msg_time = "Time limit reached. (HiGHS Status 13:" +_msg_iter = "Iteration limit reached. (HiGHS Status 14:" + + +@pytest.mark.skipif(np.intp(0).itemsize < 8, + reason="Unhandled 32-bit GCC FP bug") +@pytest.mark.slow +@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 0.1}, _msg_time), + ({"node_limit": 1}, _msg_iter)]) +def test_milp_timeout_16545(options, msg): + # Ensure solution is not thrown away if MILP solver times out + # -- see gh-16545 + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(100, 100)) + b_lb = np.full(100, fill_value=-np.inf) + b_ub = np.full(100, fill_value=25) + constraints = LinearConstraint(A, b_lb, b_ub) + variable_lb = np.zeros(100) + variable_ub = np.ones(100) + variable_bounds = Bounds(variable_lb, variable_ub) + integrality = np.ones(100) + c_vector = -np.ones(100) + res = milp( + c_vector, + integrality=integrality, + bounds=variable_bounds, + constraints=constraints, + options=options, + ) + + assert res.message.startswith(msg) + assert res["x"] is not None + + # ensure solution is feasible + x = res["x"] + tol = 1e-8 # sometimes needed due to finite numerical precision + assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol) + assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol) + assert np.allclose(x, np.round(x)) + + +def test_three_constraints_16878(): + # `milp` failed when exactly three constraints were passed + # Ensure that this is no longer the case. + rng = np.random.default_rng(5123833489170494244) + A = rng.integers(0, 5, size=(6, 6)) + bl = np.full(6, fill_value=-np.inf) + bu = np.full(6, fill_value=10) + constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]), + LinearConstraint(A[2:4], bl[2:4], bu[2:4]), + LinearConstraint(A[4:], bl[4:], bu[4:])] + constraints2 = [(A[:2], bl[:2], bu[:2]), + (A[2:4], bl[2:4], bu[2:4]), + (A[4:], bl[4:], bu[4:])] + lb = np.zeros(6) + ub = np.ones(6) + variable_bounds = Bounds(lb, ub) + c = -np.ones(6) + res1 = milp(c, bounds=variable_bounds, constraints=constraints) + res2 = milp(c, bounds=variable_bounds, constraints=constraints2) + ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu)) + assert res1.success and res2.success + assert_allclose(res1.x, ref.x) + assert_allclose(res2.x, ref.x) + + +@pytest.mark.xslow +def test_mip_rel_gap_passdown(): + # Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases + # Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown + # MIP taken from test_mip_6 above + A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], + [39, 16, 22, 28, 26, 30, 23, 24], + [18, 14, 29, 27, 30, 38, 26, 26], + [41, 26, 28, 36, 18, 38, 16, 26]]) + b_eq = np.array([7872, 10466, 11322, 12058]) + c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) + + mip_rel_gaps = [0.25, 0.01, 0.001] + sol_mip_gaps = [] + for mip_rel_gap in mip_rel_gaps: + res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq), + integrality=True, options={"mip_rel_gap": mip_rel_gap}) + # assert that the solution actually has mip_gap lower than the + # required mip_rel_gap supplied + assert res.mip_gap <= mip_rel_gap + # check that `res.mip_gap` is as defined in the documentation + assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun + sol_mip_gaps.append(res.mip_gap) + + # make sure that the mip_rel_gap parameter is actually doing something + # check that differences between solution gaps are declining + # monotonically with the mip_rel_gap parameter. + assert np.all(np.diff(sol_mip_gaps) < 0) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py new file mode 100644 index 0000000000000000000000000000000000000000..6dad4bad5a3965c6c414be4a1cd0cb097ccaff1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py @@ -0,0 +1,808 @@ +import numpy as np +import pytest +from scipy.linalg import block_diag +from scipy.sparse import csc_matrix +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_less, assert_, assert_allclose, + suppress_warnings) +from scipy.optimize import (NonlinearConstraint, + LinearConstraint, + Bounds, + minimize, + BFGS, + SR1) + + +class Maratos: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[2*x[0], 2*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosTestArgs: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.a = a + self.b = b + self.bounds = None + + def _test_args(self, a, b): + if self.a != a or self.b != b: + raise ValueError() + + def fun(self, x, a, b): + self._test_args(a, b) + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x, a, b): + self._test_args(a, b) + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x, a, b): + self._test_args(a, b) + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosGradInFunc: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return (2*(x[0]**2 + x[1]**2 - 1) - x[0], + np.array([4*x[0]-1, 4*x[1]])) + + @property + def grad(self): + return True + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class HyperbolicIneq: + """Problem 15.1 from Nocedal and Wright + + The following optimization problem: + minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + Subject to: 1/(x[0] + 1) - x[1] >= 1/4 + x[0] >= 0 + x[1] >= 0 + """ + def __init__(self, constr_jac=None, constr_hess=None): + self.x0 = [0, 0] + self.x_opt = [1.952823, 0.088659] + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = Bounds(0, np.inf) + + def fun(self, x): + return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + + def grad(self, x): + return [x[0] - 2, x[1] - 1/2] + + def hess(self, x): + return np.eye(2) + + @property + def constr(self): + def fun(x): + return 1/(x[0] + 1) - x[1] + + if self.constr_jac is None: + def jac(x): + return [[-1/(x[0] + 1)**2, -1]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0], + [0, 0]]) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 0.25, np.inf, jac, hess) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + self.bounds = None + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + @property + def constr(self): + return () + + +class IneqRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.5022, 0.2489] + self.bounds = None + + @property + def constr(self): + A = [[1, 2]] + b = 1 + return LinearConstraint(A, -np.inf, b) + + +class BoundedRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: -2 <= x[0] <= 0 + 0 <= x[1] <= 2 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-0.2, 0.2] + self.x_opt = None + self.bounds = Bounds([-2, 0], [0, 2]) + + +class EqIneqRosenbrock(Rosenbrock): + """Rosenbrock subject to equality and inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + 2 x[0] + x[1] = 1 + + Taken from matlab ``fimincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.41494, 0.17011] + self.bounds = None + + @property + def constr(self): + A_ineq = [[1, 2]] + b_ineq = 1 + A_eq = [[2, 1]] + b_eq = 1 + return (LinearConstraint(A_ineq, -np.inf, b_ineq), + LinearConstraint(A_eq, b_eq, b_eq)) + + +class Elec: + """Distribution of electrons on a sphere. + + Problem no 2 from COPS collection [2]_. Find + the equilibrium state distribution (of minimal + potential) of the electrons positioned on a + conducting sphere. + + References + ---------- + .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson, + "Benchmarking optimization software with COPS 3.0.", + Argonne National Lab., Argonne, IL (US), 2004. + """ + def __init__(self, n_electrons=200, random_state=0, + constr_jac=None, constr_hess=None): + self.n_electrons = n_electrons + self.rng = np.random.RandomState(random_state) + # Initial Guess + phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons) + theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons) + x = np.cos(theta) * np.cos(phi) + y = np.cos(theta) * np.sin(phi) + z = np.sin(theta) + self.x0 = np.hstack((x, y, z)) + self.x_opt = None + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def _get_cordinates(self, x): + x_coord = x[:self.n_electrons] + y_coord = x[self.n_electrons:2 * self.n_electrons] + z_coord = x[2 * self.n_electrons:] + return x_coord, y_coord, z_coord + + def _compute_coordinate_deltas(self, x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + dx = x_coord[:, None] - x_coord + dy = y_coord[:, None] - y_coord + dz = z_coord[:, None] - z_coord + return dx, dy, dz + + def fun(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + with np.errstate(divide='ignore'): + dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 + dm1[np.diag_indices_from(dm1)] = 0 + return 0.5 * np.sum(dm1) + + def grad(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + + with np.errstate(divide='ignore'): + dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 + dm3[np.diag_indices_from(dm3)] = 0 + + grad_x = -np.sum(dx * dm3, axis=1) + grad_y = -np.sum(dy * dm3, axis=1) + grad_z = -np.sum(dz * dm3, axis=1) + + return np.hstack((grad_x, grad_y, grad_z)) + + def hess(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + d = (dx**2 + dy**2 + dz**2) ** 0.5 + + with np.errstate(divide='ignore'): + dm3 = d ** -3 + dm5 = d ** -5 + + i = np.arange(self.n_electrons) + dm3[i, i] = 0 + dm5[i, i] = 0 + + Hxx = dm3 - 3 * dx**2 * dm5 + Hxx[i, i] = -np.sum(Hxx, axis=1) + + Hxy = -3 * dx * dy * dm5 + Hxy[i, i] = -np.sum(Hxy, axis=1) + + Hxz = -3 * dx * dz * dm5 + Hxz[i, i] = -np.sum(Hxz, axis=1) + + Hyy = dm3 - 3 * dy**2 * dm5 + Hyy[i, i] = -np.sum(Hyy, axis=1) + + Hyz = -3 * dy * dz * dm5 + Hyz[i, i] = -np.sum(Hyz, axis=1) + + Hzz = dm3 - 3 * dz**2 * dm5 + Hzz[i, i] = -np.sum(Hzz, axis=1) + + H = np.vstack(( + np.hstack((Hxx, Hxy, Hxz)), + np.hstack((Hxy, Hyy, Hyz)), + np.hstack((Hxz, Hyz, Hzz)) + )) + + return H + + @property + def constr(self): + def fun(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + return x_coord**2 + y_coord**2 + z_coord**2 - 1 + + if self.constr_jac is None: + def jac(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + Jx = 2 * np.diag(x_coord) + Jy = 2 * np.diag(y_coord) + Jz = 2 * np.diag(z_coord) + return csc_matrix(np.hstack((Jx, Jy, Jz))) + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + D = 2 * np.diag(v) + return block_diag(D, D, D) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, -np.inf, 0, jac, hess) + + +class TestTrustRegionConstr(TestCase): + + @pytest.mark.slow + def test_list_of_problems(self): + list_of_problems = [Maratos(), + Maratos(constr_hess='2-point'), + Maratos(constr_hess=SR1()), + Maratos(constr_jac='2-point', constr_hess=SR1()), + MaratosGradInFunc(), + HyperbolicIneq(), + HyperbolicIneq(constr_hess='3-point'), + HyperbolicIneq(constr_hess=BFGS()), + HyperbolicIneq(constr_jac='3-point', + constr_hess=BFGS()), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + BoundedRosenbrock(), + Elec(n_electrons=2), + Elec(n_electrons=2, constr_hess='2-point'), + Elec(n_electrons=2, constr_hess=SR1()), + Elec(n_electrons=2, constr_jac='3-point', + constr_hess=SR1())] + + for prob in list_of_problems: + for grad in (prob.grad, '3-point', False): + for hess in (prob.hess, + '3-point', + SR1(), + BFGS(exception_strategy='damp_update'), + BFGS(exception_strategy='skip_update')): + + # Remove exceptions + if grad in ('2-point', '3-point', 'cs', False) and \ + hess in ('2-point', '3-point', 'cs'): + continue + if prob.grad is True and grad in ('3-point', False): + continue + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=grad, hess=hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, + decimal=5) + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_default_jac_and_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_default_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', + jac='2-point') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_no_constraints(self): + prob = Rosenbrock() + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hess=prob.hess) + result1 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='2-point') + + result2 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='3-point') + assert_array_almost_equal(result.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) + + def test_hessp(self): + prob = Maratos() + + def hessp(x, p): + H = prob.hess(x) + return H.dot(p) + + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hessp=hessp, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_args(self): + prob = MaratosTestArgs("a", 234) + + result = minimize(prob.fun, prob.x0, ("a", 234), + method='trust-constr', + jac=prob.grad, hess=prob.hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_raise_exception(self): + prob = Maratos() + message = "Whenever the gradient is estimated via finite-differences" + with pytest.raises(ValueError, match=message): + minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point', + hess='2-point', constraints=prob.constr) + + def test_issue_9044(self): + # https://github.com/scipy/scipy/issues/9044 + # Test the returned `OptimizeResult` contains keys consistent with + # other solvers. + + def callback(x, info): + assert_('nit' in info) + assert_('niter' in info) + + result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, + hess=lambda x: 2, callback=callback, + method='trust-constr') + assert_(result.get('success')) + assert_(result.get('nit', -1) == 1) + + # Also check existence of the 'niter' attribute, for backward + # compatibility + assert_(result.get('niter', -1) == 1) + + def test_issue_15093(self): + # scipy docs define bounds as inclusive, so it shouldn't be + # an issue to set x0 on the bounds even if keep_feasible is + # True. Previously, trust-constr would treat bounds as + # exclusive. + + x0 = np.array([0., 0.5]) + + def obj(x): + x1 = x[0] + x2 = x[1] + return x1 ** 2 + x2 ** 2 + + bounds = Bounds(np.array([0., 0.]), np.array([1., 1.]), + keep_feasible=True) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize( + method='trust-constr', + fun=obj, + x0=x0, + bounds=bounds) + + assert result['success'] + +class TestEmptyConstraint(TestCase): + """ + Here we minimize x^2+y^2 subject to x^2-y^2>1. + The actual minimum is at (0, 0) which fails the constraint. + Therefore we will find a minimum on the boundary at (+/-1, 0). + + When minimizing on the boundary, optimize uses a set of + constraints that removes the constraint that sets that + boundary. In our case, there's only one constraint, so + the result is an empty constraint. + + This tests that the empty constraint works. + """ + def test_empty_constraint(self): + + def function(x): + return x[0]**2 + x[1]**2 + + def functionjacobian(x): + return np.array([2.*x[0], 2.*x[1]]) + + def functionhvp(x, v): + return 2.*v + + def constraint(x): + return np.array([x[0]**2 - x[1]**2]) + + def constraintjacobian(x): + return np.array([[2*x[0], -2*x[1]]]) + + def constraintlcoh(x, v): + return np.array([[2., 0.], [0., -2.]]) * v[0] + + constraint = NonlinearConstraint(constraint, 1., np.inf, + constraintjacobian, constraintlcoh) + + startpoint = [1., 2.] + + bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) + + result = minimize( + function, + startpoint, + method='trust-constr', + jac=functionjacobian, + hessp=functionhvp, + constraints=[constraint], + bounds=bounds, + ) + + assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4) + + +def test_bug_11886(): + def opt(x): + return x[0]**2+x[1]**2 + + with np.testing.suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + A = np.matrix(np.diag([1, 1])) + lin_cons = LinearConstraint(A, -1, np.inf) + # just checking that there are no errors + minimize(opt, 2*[1], constraints = lin_cons) + + +# Remove xfail when gh-11649 is resolved +@pytest.mark.xfail(reason="Known bug in trust-constr; see gh-11649.", + strict=True) +def test_gh11649(): + bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True) + + def assert_inbounds(x): + assert np.all(x >= bnds.lb) + assert np.all(x <= bnds.ub) + + def obj(x): + assert_inbounds(x) + return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1) + + def nce(x): + assert_inbounds(x) + return x[0]**2 + x[1] + + def nci(x): + assert_inbounds(x) + return x[0]*x[1] + + x0 = np.array((0.99, -0.99)) + nlcs = [NonlinearConstraint(nci, -10, np.inf), + NonlinearConstraint(nce, 1, 1)] + + res = minimize(fun=obj, x0=x0, method='trust-constr', + bounds=bnds, constraints=nlcs) + assert res.success + assert_inbounds(res.x) + assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub + assert_allclose(nce(res.x), nlcs[1].ub) + + ref = minimize(fun=obj, x0=x0, method='slsqp', + bounds=bnds, constraints=nlcs) + assert_allclose(res.fun, ref.fun) + + +class TestBoundedNelderMead: + + @pytest.mark.parametrize('bounds, x_opt', + [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt), + (Bounds(-np.inf, -0.8), [-0.8, -0.8]), + (Bounds(3.0, np.inf), [3.0, 9.0]), + (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]), + ]) + def test_rosen_brock_with_bounds(self, bounds, x_opt): + prob = Rosenbrock() + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, -10], + method='Nelder-Mead', + bounds=bounds) + assert np.less_equal(bounds.lb, result.x).all() + assert np.less_equal(result.x, bounds.ub).all() + assert np.allclose(prob.fun(result.x), result.fun) + assert np.allclose(result.x, x_opt, atol=1.e-3) + + def test_equal_all_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 5.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 5.0]) + + def test_equal_one_bounds(self): + prob = Rosenbrock() + bounds = Bounds([4.0, 5.0], [4.0, 20.0]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "Initial guess is not within " + "the specified bounds") + result = minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) + assert np.allclose(result.x, [4.0, 16.0]) + + def test_invalid_bounds(self): + prob = Rosenbrock() + message = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, -5.0]) + minimize(prob.fun, [-10, 3], + method='Nelder-Mead', + bounds=bounds) + + @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, " + "see gh-13846") + def test_outside_bounds_warning(self): + prob = Rosenbrock() + message = "Initial guess is not within the specified bounds" + with pytest.warns(UserWarning, match=message): + bounds = Bounds([-np.inf, 1.0], [4.0, 5.0]) + minimize(prob.fun, [-10, 8], + method='Nelder-Mead', + bounds=bounds) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2ad1e3528400e0812a2d60c64a9724dea7fa6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py @@ -0,0 +1,1099 @@ +""" +Unit tests for optimization routines from minpack.py. +""" +import warnings +import pytest + +from numpy.testing import (assert_, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose, + assert_warns, suppress_warnings) +from pytest import raises as assert_raises +import numpy as np +from numpy import array, float64 +from multiprocessing.pool import ThreadPool + +from scipy import optimize, linalg +from scipy.special import lambertw +from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point +from scipy.optimize import OptimizeWarning +from scipy.optimize._minimize import Bounds + + +class ReturnShape: + """This class exists to create a callable that does not have a '__name__' attribute. + + __init__ takes the argument 'shape', which should be a tuple of ints. + When an instance is called with a single argument 'x', it returns numpy.ones(shape). + """ + + def __init__(self, shape): + self.shape = shape + + def __call__(self, x): + return np.ones(self.shape) + + +def dummy_func(x, shape): + """A function that returns an array of ones of the given shape. + `x` is ignored. + """ + return np.ones(shape) + + +def sequence_parallel(fs): + with ThreadPool(len(fs)) as pool: + return pool.map(lambda f: f(), fs) + + +# Function and Jacobian for tests of solvers for systems of nonlinear +# equations + + +def pressure_network(flow_rates, Qtot, k): + """Evaluate non-linear equation system representing + the pressures and flows in a system of n parallel pipes:: + + f_i = P_i - P_0, for i = 1..n + f_0 = sum(Q_i) - Qtot + + where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. + Pressure is modeled as a P=kQ**2 where k is a valve coefficient and + Q is the flow rate. + + Parameters + ---------- + flow_rates : float + A 1-D array of n flow rates [kg/s]. + k : float + A 1-D array of n valve coefficients [1/kg m]. + Qtot : float + A scalar, the total input flow rate [kg/s]. + + Returns + ------- + F : float + A 1-D array, F[i] == f_i. + + """ + P = k * flow_rates**2 + F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) + return F + + +def pressure_network_jacobian(flow_rates, Qtot, k): + """Return the jacobian of the equation system F(flow_rates) + computed by `pressure_network` with respect to + *flow_rates*. See `pressure_network` for the detailed + description of parameters. + + Returns + ------- + jac : float + *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` + and *f_i* and *Q_i* are described in the doc for `pressure_network` + """ + n = len(flow_rates) + pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) + + jac = np.empty((n, n)) + jac[:n-1, :n-1] = pdiff * 0 + jac[:n-1, n-1] = 0 + jac[n-1, :] = np.ones(n) + + return jac + + +def pressure_network_fun_and_grad(flow_rates, Qtot, k): + return (pressure_network(flow_rates, Qtot, k), + pressure_network_jacobian(flow_rates, Qtot, k)) + + +class TestFSolve: + def test_pressure_network_no_gradient(self): + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_pressure_network_with_gradient(self): + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=pressure_network_jacobian) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_fprime_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_wrong_shape_fprime_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0], fprime=deriv_func) + + def test_float32(self): + def func(x): + return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2 + p = optimize.fsolve(func, np.array([1, 1], np.float32)) + assert_allclose(func(p), [0, 0], atol=1e-3) + + def test_reentrant_func(self): + def func(*args): + self.test_pressure_network_no_gradient() + return pressure_network(*args) + + # fsolve without gradient, equal pipes -> equal flows. + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + func, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_reentrant_Dfunc(self): + def deriv_func(*args): + self.test_pressure_network_with_gradient() + return pressure_network_jacobian(*args) + + # fsolve with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=deriv_func) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_pressure_network_no_gradient] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_pressure_network_with_gradient] * 10) + assert all([result is None for result in v]) + + +class TestRootHybr: + def test_pressure_network_no_gradient(self): + # root/hybr without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='hybr', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient(self): + # root/hybr with gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([[2., 0., 2., 0.]]) + final_flows = optimize.root(pressure_network, initial_guess, + args=(Qtot, k), method='hybr', + jac=pressure_network_jacobian).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient_combined(self): + # root/hybr with gradient and function combined, equal pipes -> equal + # flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network_fun_and_grad, + initial_guess, args=(Qtot, k), + method='hybr', jac=True).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestRootLM: + def test_pressure_network_no_gradient(self): + # root/lm without gradient, equal pipes -> equal flows + k = np.full(4, 0.5) + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='lm', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestLeastSq: + def setup_method(self): + x = np.linspace(0, 10, 40) + a,b,c = 3.1, 42, -304.2 + self.x = x + self.abc = a,b,c + y_true = a*x**2 + b*x + c + np.random.seed(0) + self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) + + def residuals(self, p, y, x): + a,b,c = p + err = y-(a*x**2 + b*x + c) + return err + + def residuals_jacobian(self, _p, _y, x): + return -np.vstack([x**2, x, np.ones_like(x)]).T + + def test_basic(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_basic_with_gradient(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=self.residuals_jacobian) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_full_output(self): + p0 = array([[0,0,0]]) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + + def test_input_untouched(self): + p0 = array([0,0,0],dtype=float64) + p0_copy = array(p0, copy=True) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + assert_array_equal(p0, p0_copy) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_Dfun_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_wrong_shape_Dfun_function(self): + def func(x): + return dummy_func(x, (2,)) + def deriv_func(x): + return dummy_func(x, (3, 3)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_float32(self): + # Regression test for gh-1447 + def func(p,x,y): + q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3] + return q - y + + x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286, + 1.231], dtype=np.float32) + y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258, + 0.034,0.0396], dtype=np.float32) + p0 = np.array([1.0,1.0,1.0,1.0]) + p1, success = optimize.leastsq(func, p0, args=(x,y)) + + assert_(success in [1,2,3,4]) + assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum()) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0]) + + def test_Dfun_can_raise(self): + def func(x): + return x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0], Dfun=deriv_func) + + def test_reentrant_func(self): + def func(*args): + self.test_basic() + return self.residuals(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(func, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_reentrant_Dfun(self): + def deriv_func(*args): + self.test_basic() + return self.residuals_jacobian(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=deriv_func) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_concurrent_no_gradient(self): + v = sequence_parallel([self.test_basic] * 10) + assert all([result is None for result in v]) + + def test_concurrent_with_gradient(self): + v = sequence_parallel([self.test_basic_with_gradient] * 10) + assert all([result is None for result in v]) + + def test_func_input_output_length_check(self): + + def func(x): + return 2 * (x[0] - 3) ** 2 + 1 + + with assert_raises(TypeError, + match='Improper input: func input vector length N='): + optimize.leastsq(func, x0=[0, 1]) + + +class TestCurveFit: + def setup_method(self): + self.y = array([1.0, 3.2, 9.5, 13.7]) + self.x = array([1.0, 2.0, 3.0, 4.0]) + + def test_one_argument(self): + def func(x,a): + return x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 1) + assert_(pcov.shape == (1,1)) + assert_almost_equal(popt[0], 1.9149, decimal=4) + assert_almost_equal(pcov[0,0], 0.0016, decimal=4) + + # Test if we get the same with full_output. Regression test for #1415. + # Also test if check_finite can be turned off. + res = curve_fit(func, self.x, self.y, + full_output=1, check_finite=False) + (popt2, pcov2, infodict, errmsg, ier) = res + assert_array_almost_equal(popt, popt2) + + def test_two_argument(self): + def func(x, a, b): + return b*x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 2) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_func_is_classmethod(self): + class test_self: + """This class tests if curve_fit passes the correct number of + arguments when the model function is a class instance method. + """ + + def func(self, x, a, b): + return b * x**a + + test_self_inst = test_self() + popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_regression_2639(self): + # This test fails if epsfcn in leastsq is too large. + x = [574.14200000000005, 574.154, 574.16499999999996, + 574.17700000000002, 574.18799999999999, 574.19899999999996, + 574.21100000000001, 574.22199999999998, 574.23400000000004, + 574.245] + y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, + 1550.0, 949.0, 841.0] + guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, + 0.0035019999999983615, 859.0] + good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03, + 1.0068462e-02, 8.57450661e+02] + + def f_double_gauss(x, x0, x1, A0, A1, sigma, c): + return (A0*np.exp(-(x-x0)**2/(2.*sigma**2)) + + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c) + popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000) + assert_allclose(popt, good, rtol=1e-5) + + def test_pcov(self): + xdata = np.array([0, 1, 2, 3, 4, 5]) + ydata = np.array([1, 1, 5, 7, 8, 12]) + sigma = np.array([1, 2, 1, 2, 1, 2]) + + def f(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3) + + # infinite variances + + def f_flat(x, a, b): + return a*x + + pcov_expected = np.array([np.inf]*4).reshape(2, 2) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Covariance of the parameters could not be estimated") + popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma) + popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0]) + + assert_(pcov.shape == (2, 2)) + assert_array_equal(pcov, pcov_expected) + + assert_(pcov1.shape == (2, 2)) + assert_array_equal(pcov1, pcov_expected) + + def test_array_like(self): + # Test sequence input. Regression test for gh-3037. + def f_linear(x, a, b): + return a*x + b + + x = [1, 2, 3, 4] + y = [3, 5, 7, 9] + assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10) + + def test_indeterminate_covariance(self): + # Test that a warning is returned when pcov is indeterminate + xdata = np.array([1, 2, 3, 4, 5, 6]) + ydata = np.array([1, 2, 3, 4, 5.5, 6]) + assert_warns(OptimizeWarning, curve_fit, + lambda x, a, b: a*x, xdata, ydata) + + def test_NaN_handling(self): + # Test for correct handling of NaNs in input data: gh-3422 + + # create input with NaNs + xdata = np.array([1, np.nan, 3]) + ydata = np.array([1, 2, 3]) + + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, xdata, ydata) + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, ydata, xdata) + + assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, + xdata, ydata, **{"check_finite": True}) + + @staticmethod + def _check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method): + kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan, + 'method': method, 'check_finite': False} + # propagate test + error_msg = ("`nan_policy='propagate'` is not supported " + "by this function.") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="propagate", maxfev=2000) + + # raise test + with assert_raises(ValueError, match="The input contains nan"): + curve_fit(**kwargs, nan_policy="raise") + + # omit test + result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit") + kwargs['xdata'] = xdata_without_nan + kwargs['ydata'] = ydata_without_nan + result_without_nan, _ = curve_fit(**kwargs) + assert_allclose(result_with_nan, result_without_nan) + + # not valid policy test + error_msg = ("nan_policy must be one of " + "{'None', 'raise', 'omit'}") + with assert_raises(ValueError, match=error_msg): + curve_fit(**kwargs, nan_policy="hi") + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_1d(self, method): + def f(x, a, b): + return a*x + b + + xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7]) + xdata_without_nan = np.array([2, 3, 4]) + ydata_without_nan = np.array([1, 2, 3]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2d(self, method): + def f(x, a, b): + x1 = x[0, :] + x2 = x[1, :] + return a*x1 + b + x2 + + xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]) + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + @pytest.mark.parametrize('n', [2, 3]) + @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) + def test_nan_policy_2_3d(self, n, method): + def f(x, a, b): + x1 = x[..., 0, :].squeeze() + x2 = x[..., 1, :].squeeze() + return a*x1 + b + x2 + + xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5], + [2, 3, np.nan, np.nan, 4, np.nan, 7]]]) + xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan + ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) + xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]]) + ydata_without_nan = np.array([1, 2, 10]) + + self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, + ydata_with_nan, ydata_without_nan, method) + + def test_empty_inputs(self): + # Test both with and without bounds (regression test for gh-9864) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [], + bounds=(1, 2)) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], []) + assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [], + bounds=(1, 2)) + + def test_function_zero_params(self): + # Fit args is zero, so "Unable to determine number of fit parameters." + assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4]) + + def test_None_x(self): # Added in GH10196 + popt, pcov = curve_fit(lambda _, a: a * np.arange(10), + None, 2 * np.arange(10)) + assert_allclose(popt, [2.]) + + def test_method_argument(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov = curve_fit(f, xdata, ydata, method=method) + assert_allclose(popt, [2., 2.]) + + assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown') + + def test_full_output(self): + def f(x, a, b): + return a * np.exp(-b * x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov, infodict, errmsg, ier = curve_fit( + f, xdata, ydata, method=method, full_output=True) + assert_allclose(popt, [2., 2.]) + assert "nfev" in infodict + assert "fvec" in infodict + if method == 'lm' or method is None: + assert "fjac" in infodict + assert "ipvt" in infodict + assert "qtf" in infodict + assert isinstance(errmsg, str) + assert ier in (1, 2, 3, 4) + + def test_bounds(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # The minimum w/out bounds is at [2., 2.], + # and with bounds it's at [1.5, smth]. + lb = [1., 0] + ub = [1.5, 3.] + + # Test that both variants of the bounds yield the same result + bounds = (lb, ub) + bounds_class = Bounds(lb, ub) + for method in [None, 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, + method=method) + assert_allclose(popt[0], 1.5) + + popt_class, pcov_class = curve_fit(f, xdata, ydata, + bounds=bounds_class, + method=method) + assert_allclose(popt_class, popt) + + # With bounds, the starting estimate is feasible. + popt, pcov = curve_fit(f, xdata, ydata, method='trf', + bounds=([0., 0], [0.6, np.inf])) + assert_allclose(popt[0], 0.6) + + # method='lm' doesn't support bounds. + assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, + method='lm') + + def test_bounds_p0(self): + # This test is for issue #5719. The problem was that an initial guess + # was ignored when 'trf' or 'dogbox' methods were invoked. + def f(x, a): + return np.sin(x + a) + + xdata = np.linspace(-2*np.pi, 2*np.pi, 40) + ydata = np.sin(xdata) + bounds = (-3 * np.pi, 3 * np.pi) + for method in ['trf', 'dogbox']: + popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi) + popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi, + bounds=bounds, method=method) + + # If the initial guess is ignored, then popt_2 would be close 0. + assert_allclose(popt_1, popt_2) + + def test_jac(self): + # Test that Jacobian callable is handled correctly and + # weighted if sigma is provided. + def f(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # Test numerical options for least_squares backend. + for method in ['trf', 'dogbox']: + for scheme in ['2-point', '3-point', 'cs']: + popt, pcov = curve_fit(f, xdata, ydata, jac=scheme, + method=method) + assert_allclose(popt, [2, 2]) + + # Test the analytic option. + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac) + assert_allclose(popt, [2, 2]) + + # Now add an outlier and provide sigma. + ydata[5] = 100 + sigma = np.ones(xdata.shape[0]) + sigma[5] = 200 + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method, + jac=jac) + # Still the optimization process is influenced somehow, + # have to set rtol=1e-3. + assert_allclose(popt, [2, 2], rtol=1e-3) + + def test_maxfev_and_bounds(self): + # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq) + # but with bounds, the parameter is `max_nfev` (via least_squares) + x = np.arange(0, 10) + y = 2*x + popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100) + popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100) + + assert_allclose(popt1, 2, atol=1e-14) + assert_allclose(popt2, 2, atol=1e-14) + + def test_curvefit_simplecovariance(self): + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + + for jac1, jac2 in [(jac, jac), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, atol=1e-14) + assert_allclose(pcov1, pcov2, atol=1e-14) + + def test_curvefit_covariance(self): + + def funcp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + return rotn.dot(a * np.exp(-b*x)) + + def jacp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + e = np.exp(-b*x) + return rotn.dot(np.vstack((e, -a * x * e)).T) + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.arange(1, 4) + y = func(xdata, 2.5, 1.0) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + # Get a rotation matrix, and obtain ydatap = R ydata + # Chisq = ydata^T C^{-1} ydata + # = ydata^T R^T R C^{-1} R^T R ydata + # = ydatap^T Cp^{-1} ydatap + # Cp^{-1} = R C^{-1} R^T + # Cp = R C R^T, since R^-1 = R^T + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], + [1./np.sqrt(2), 1./np.sqrt(2), 0], + [0, 0, 1.0]]) + ydatap = rotn.dot(ydata) + covarp = rotn.dot(covar).dot(rotn.T) + + for jac1, jac2 in [(jac, jacp), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14) + assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14) + + @pytest.mark.parametrize("absolute_sigma", [False, True]) + def test_curvefit_scalar_sigma(self, absolute_sigma): + def func(x, a, b): + return a * x + b + + x, y = self.x, self.y + _, pcov1 = curve_fit(func, x, y, sigma=2, absolute_sigma=absolute_sigma) + # Explicitly building the sigma 1D array + _, pcov2 = curve_fit( + func, x, y, sigma=np.full_like(y, 2), absolute_sigma=absolute_sigma + ) + assert np.all(pcov1 == pcov2) + + def test_dtypes(self): + # regression test for gh-9581: curve_fit fails if x and y dtypes differ + x = np.arange(-3, 5) + y = 1.5*x + 3.0 + 0.5*np.sin(x) + + def func(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + for dtx in [np.float32, np.float64]: + for dty in [np.float32, np.float64]: + x = x.astype(dtx) + y = y.astype(dty) + + with warnings.catch_warnings(): + warnings.simplefilter("error", OptimizeWarning) + p, cov = curve_fit(func, x, y, method=method) + + assert np.isfinite(cov).all() + assert not np.allclose(p, 1) # curve_fit's initial value + + def test_dtypes2(self): + # regression test for gh-7117: curve_fit fails if + # both inputs are float32 + def hyperbola(x, s_1, s_2, o_x, o_y, c): + b_2 = (s_1 + s_2) / 2 + b_1 = (s_2 - s_1) / 2 + return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4) + + min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0]) + max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0]) + guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5]) + + params = [-2, .4, -1, -5, 9.5] + xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32]) + ydata = hyperbola(xdata, *params) + + # run optimization twice, with xdata being float32 and float64 + popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + xdata = xdata.astype(np.float32) + ydata = hyperbola(xdata, *params) + + popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, + bounds=(min_fit, max_fit)) + + assert_allclose(popt_32, popt_64, atol=2e-5) + + def test_broadcast_y(self): + xdata = np.arange(10) + target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata)) + def fit_func(x, a, b): + return a * x ** 2 + b * x - target + for method in ['lm', 'trf', 'dogbox']: + popt0, pcov0 = curve_fit(fit_func, + xdata=xdata, + ydata=np.zeros_like(xdata), + method=method) + popt1, pcov1 = curve_fit(fit_func, + xdata=xdata, + ydata=0, + method=method) + assert_allclose(pcov0, pcov1) + + def test_args_in_kwargs(self): + # Ensure that `args` cannot be passed as keyword argument to `curve_fit` + + def func(x, a, b): + return a * x + b + + with assert_raises(ValueError): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17], + p0=[1], + args=(1,)) + + def test_data_point_number_validation(self): + def func(x, a, b, c, d, e): + return a * np.exp(-b * x) + c + d + e + + with assert_raises(TypeError, match="The number of func parameters="): + curve_fit(func, + xdata=[1, 2, 3, 4], + ydata=[5, 9, 13, 17]) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_gh4555(self): + # gh-4555 reported that covariance matrices returned by `leastsq` + # can have negative diagonal elements and eigenvalues. (In fact, + # they can also be asymmetric.) This shows up in the output of + # `scipy.optimize.curve_fit`. Check that it has been resolved.giit + def f(x, a, b, c, d, e): + return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e + + rng = np.random.default_rng(408113519974467917) + n = 100 + x = np.arange(n) + y = np.linspace(2, 7, n) + rng.random(n) + p, cov = optimize.curve_fit(f, x, y, maxfev=100000) + assert np.all(np.diag(cov) > 0) + eigs = linalg.eigh(cov)[0] # separate line for debugging + # some platforms see a small negative eigevenvalue + assert np.all(eigs > -1e-2) + assert_allclose(cov, cov.T) + + def test_gh4555b(self): + # check that PR gh-17247 did not significantly change covariance matrix + # for simple cases + rng = np.random.default_rng(408113519974467917) + + def func(x, a, b, c): + return a * np.exp(-b * x) + c + + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3, 0.5) + y_noise = 0.2 * rng.normal(size=xdata.size) + ydata = y + y_noise + _, res = curve_fit(func, xdata, ydata) + # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b + ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749], + [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403], + [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]] + # Linux_Python_38_32bit_full fails with default tolerance + assert_allclose(res, ref, 2e-7) + + def test_gh13670(self): + # gh-13670 reported that `curve_fit` executes callables + # with the same values of the parameters at the beginning of + # optimization. Check that this has been resolved. + + rng = np.random.default_rng(8250058582555444926) + x = np.linspace(0, 3, 101) + y = 2 * x + 1 + rng.normal(size=101) * 0.5 + + def line(x, *p): + assert not np.all(line.last_p == p) + line.last_p = p + return x * p[0] + p[1] + + def jac(x, *p): + assert not np.all(jac.last_p == p) + jac.last_p = p + return np.array([x, np.ones_like(x)]).T + + line.last_p = None + jac.last_p = None + p0 = np.array([1.0, 5.0]) + curve_fit(line, x, y, p0, method='lm', jac=jac) + + +class TestFixedPoint: + + def test_scalar_trivial(self): + # f(x) = 2x; fixed point should be x=0 + def func(x): + return 2.0*x + x0 = 1.0 + x = fixed_point(func, x0) + assert_almost_equal(x, 0.0) + + def test_scalar_basic1(self): + # f(x) = x**2; x0=1.05; fixed point should be x=1 + def func(x): + return x**2 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_scalar_basic2(self): + # f(x) = x**0.5; x0=1.05; fixed point should be x=1 + def func(x): + return x**0.5 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_array_trivial(self): + def func(x): + return 2.0*x + x0 = [0.3, 0.15] + with np.errstate(all='ignore'): + x = fixed_point(func, x0) + assert_almost_equal(x, [0.0, 0.0]) + + def test_array_basic1(self): + # f(x) = c * x**2; fixed point should be x=1/c + def func(x, c): + return c * x**2 + c = array([0.75, 1.0, 1.25]) + x0 = [1.1, 1.15, 0.9] + with np.errstate(all='ignore'): + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, 1.0/c) + + def test_array_basic2(self): + # f(x) = c * x**0.5; fixed point should be x=c**2 + def func(x, c): + return c * x**0.5 + c = array([0.75, 1.0, 1.25]) + x0 = [0.8, 1.1, 1.1] + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, c**2) + + def test_lambertw(self): + # python-list/2010-December/594592.html + xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, + args=(), xtol=1e-12, maxiter=500) + assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) + assert_allclose(xxroot, lambertw(1)/2) + + def test_no_acceleration(self): + # github issue 5460 + ks = 2 + kl = 6 + m = 1.3 + n0 = 1.001 + i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1)) + + def func(n): + return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1 + + n = fixed_point(func, n0, method='iteration') + assert_allclose(n, m) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..84a3d8115aa067d46a2e10d8581165dc838c4d6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py @@ -0,0 +1,3163 @@ +""" +Unit tests for optimization routines from optimize.py + +Authors: + Ed Schofield, Nov 2005 + Andrew Straw, April 2008 + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import itertools +import platform +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_almost_equal, + assert_no_warnings, assert_warns, + assert_array_less, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +from scipy import optimize +from scipy.optimize._minimize import Bounds, NonlinearConstraint +from scipy.optimize._minimize import (MINIMIZE_METHODS, + MINIMIZE_METHODS_NEW_CB, + MINIMIZE_SCALAR_METHODS) +from scipy.optimize._linprog import LINPROG_METHODS +from scipy.optimize._root import ROOT_METHODS +from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS +from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS +from scipy.optimize._optimize import MemoizeJac, show_options, OptimizeResult +from scipy.optimize import rosen, rosen_der, rosen_hess + +from scipy.sparse import (coo_matrix, csc_matrix, csr_matrix, coo_array, + csr_array, csc_array) + +def test_check_grad(): + # Verify if check_grad is able to estimate the derivative of the + # expit (logistic sigmoid) function. + + def expit(x): + return 1 / (1 + np.exp(-x)) + + def der_expit(x): + return np.exp(-x) / (1 + np.exp(-x))**2 + + x0 = np.array([1.5]) + + r = optimize.check_grad(expit, der_expit, x0) + assert_almost_equal(r, 0) + r = optimize.check_grad(expit, der_expit, x0, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6) + assert_almost_equal(r, 0) + r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + # Check if the epsilon parameter is being considered. + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0) + assert r > 1e-7 + r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1, + direction='random', seed=1234) - 0) + assert r > 1e-7 + + def x_sinx(x): + return (x*np.sin(x)).sum() + + def der_x_sinx(x): + return np.sin(x) + x*np.cos(x) + + x0 = np.arange(0, 2, 0.2) + + r = optimize.check_grad(x_sinx, der_x_sinx, x0, + direction='random', seed=1234) + assert_almost_equal(r, 0) + + assert_raises(ValueError, optimize.check_grad, + x_sinx, der_x_sinx, x0, + direction='random_projection', seed=1234) + + # checking can be done for derivatives of vector valued functions + r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0, + direction='all', seed=1234) + assert r < 5e-7 + + +class CheckOptimize: + """ Base test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def setup_method(self): + self.F = np.array([[1, 1, 1], + [1, 1, 0], + [1, 0, 1], + [1, 0, 0], + [1, 0, 0]]) + self.K = np.array([1., 0.3, 0.5]) + self.startparams = np.zeros(3, np.float64) + self.solution = np.array([0., -0.524869316, 0.487525860]) + self.maxiter = 1000 + self.funccalls = 0 + self.gradcalls = 0 + self.trace = [] + + def func(self, x): + self.funccalls += 1 + if self.funccalls > 6000: + raise RuntimeError("too many iterations in optimization routine") + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + f = logZ - np.dot(self.K, x) + self.trace.append(np.copy(x)) + return f + + def grad(self, x): + self.gradcalls += 1 + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.transpose(), p) - self.K + + def hess(self, x): + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.T, + np.dot(np.diag(p), self.F - np.dot(self.F.T, p))) + + def hessp(self, x, p): + return np.dot(self.hess(x), p) + + +class CheckOptimizeParameterized(CheckOptimize): + + def test_cg(self): + # conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='CG', jac=self.grad, + options=opts) + params, fopt, func_calls, grad_calls, warnflag = \ + res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + else: + retval = optimize.fmin_cg(self.func, self.startparams, + self.grad, (), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 9, self.funccalls + assert self.gradcalls == 7, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[2:4], + [[0, -0.5, 0.5], + [0, -5.05700028e-01, 4.95985862e-01]], + atol=1e-14, rtol=1e-7) + + def test_cg_cornercase(self): + def f(r): + return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2 + + # Check several initial guesses. (Too far away from the + # minimum, the function ends up in the flat region of exp.) + for x0 in np.linspace(-0.75, 3, 71): + sol = optimize.minimize(f, [x0], method='CG') + assert sol.success + assert_allclose(sol.x, [0.5], rtol=1e-5) + + def test_bfgs(self): + # Broyden-Fletcher-Goldfarb-Shanno optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, + jac=self.grad, method='BFGS', args=(), + options=opts) + + params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = ( + res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + else: + retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, gopt, Hopt, + func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 10, self.funccalls + assert self.gradcalls == 8, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[6:8], + [[0, -5.25060743e-01, 4.87748473e-01], + [0, -5.24885582e-01, 4.87530347e-01]], + atol=1e-14, rtol=1e-7) + + def test_bfgs_hess_inv0_neg(self): + # Ensure that BFGS does not accept neg. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': -np.eye(5)} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_semipos(self): + # Ensure that BFGS does not accept semi pos. def. initial inverse + # Hessian estimate. + with pytest.raises(ValueError, match="'hess_inv0' matrix isn't " + "positive definite."): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + hess_inv0 = np.eye(5) + hess_inv0[0, 0] = 0 + opts = {'disp': self.disp, 'hess_inv0': hess_inv0} + optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(), + options=opts) + + def test_bfgs_hess_inv0_sanity(self): + # Ensure that BFGS handles `hess_inv0` parameter correctly. + fun = optimize.rosen + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + opts = {'disp': self.disp, 'hess_inv0': 1e-2 * np.eye(5)} + res = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options=opts) + res_true = optimize.minimize(fun, x0=x0, method='BFGS', args=(), + options={'disp': self.disp}) + assert_allclose(res.fun, res_true.fun, atol=1e-6) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_bfgs_infinite(self): + # Test corner case where -Inf is the minimum. See gh-2019. + def func(x): + return -np.e ** (-x) + def fprime(x): + return -func(x) + x0 = [0] + with np.errstate(over='ignore'): + if self.use_wrapper: + opts = {'disp': self.disp} + x = optimize.minimize(func, x0, jac=fprime, method='BFGS', + args=(), options=opts)['x'] + else: + x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp) + assert not np.isfinite(func(x)) + + def test_bfgs_xrtol(self): + # test for #17345 to test xrtol parameter + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'xrtol': 1e-3}) + ref = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'gtol': 1e-3}) + assert res.nit != ref.nit + + def test_bfgs_c1(self): + # test for #18977 insufficiently low value of c1 leads to precision loss + # for poor starting parameters + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + res_c1_small = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-8}) + res_c1_big = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c1': 1e-1}) + + assert res_c1_small.nfev > res_c1_big.nfev + + def test_bfgs_c2(self): + # test that modification of c2 parameter + # results in different number of iterations + x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + res_default = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': .9}) + res_mod = optimize.minimize(optimize.rosen, + x0, method='bfgs', options={'c2': 1e-2}) + assert res_default.nit > res_mod.nit + + @pytest.mark.parametrize(["c1", "c2"], [[0.5, 2], + [-0.1, 0.1], + [0.2, 0.1]]) + def test_invalid_c1_c2(self, c1, c2): + with pytest.raises(ValueError, match="'c1' and 'c2'"): + x0 = [10.3, 20.7, 10.8, 1.9, -1.2] + optimize.minimize(optimize.rosen, x0, method='cg', + options={'c1': c1, 'c2': c2}) + + def test_powell(self): + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + # params[0] does not affect the objective function + assert_allclose(params[1:], self.solution[1:], atol=5e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + # + # However, some leeway must be added: the exact evaluation + # count is sensitive to numerical error, and floating-point + # computations are not bit-for-bit reproducible across + # machines, and when using e.g., MKL, data alignment + # etc., affect the rounding error. + # + assert self.funccalls <= 116 + 20, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + @pytest.mark.xfail(reason="This part of test_powell fails on some " + "platforms, but the solution returned by powell is " + "still valid.") + def test_powell_gh14014(self): + # This part of test_powell started failing on some CI platforms; + # see gh-14014. Since the solution is still correct and the comments + # in test_powell suggest that small differences in the bits are known + # to change the "trace" of the solution, seems safe to xfail to get CI + # green now and investigate later. + + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[34:39], + [[0.72949016, -0.44156936, 0.47100962], + [0.72949016, -0.44156936, 0.48052496], + [1.45898031, -0.88313872, 0.95153458], + [0.72949016, -0.44156936, 0.47576729], + [1.72949016, -0.44156936, 0.47576729]], + atol=1e-14, rtol=1e-7) + + def test_powell_bounded(self): + # Powell (direction set) optimization routine + # same as test_powell above, but with bounds + bounds = [(-np.pi, np.pi) for _ in self.startparams] + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + bounds=bounds, + method='Powell', options=opts) + params, func_calls = (res['x'], res['nfev']) + + assert func_calls == self.funccalls + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6, rtol=1e-5) + + # The exact evaluation count is sensitive to numerical error, and + # floating-point computations are not bit-for-bit reproducible + # across machines, and when using e.g. MKL, data alignment etc. + # affect the rounding error. + # It takes 155 calls on my machine, but we can add the same +20 + # margin as is used in `test_powell` + assert self.funccalls <= 155 + 20 + assert self.gradcalls == 0 + + def test_neldermead(self): + # Nelder-Mead simplex algorithm + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['nit'], res['nfev'], + res['status']) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 167, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[76:78], + [[0.1928968, -0.62780447, 0.35166118], + [0.19572515, -0.63648426, 0.35838135]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex(self): + # Nelder-Mead simplex algorithm + simplex = np.zeros((4, 3)) + simplex[...] = self.startparams + for j in range(3): + simplex[j+1, j] += 0.1 + + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': True, 'initial_simplex': simplex} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = (res['x'], + res['fun'], + res['nit'], + res['nfev'], + res['status']) + assert_allclose(res['allvecs'][0], simplex[0]) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.17.0. Don't allow them to increase. + assert self.funccalls == 100, self.funccalls + assert self.gradcalls == 0, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.15.0 + assert_allclose(self.trace[50:52], + [[0.14687474, -0.5103282, 0.48252111], + [0.14474003, -0.5282084, 0.48743951]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex_bad(self): + # Check it fails with a bad simplices + bad_simplices = [] + + simplex = np.zeros((3, 2)) + simplex[...] = self.startparams[:2] + for j in range(2): + simplex[j+1, j] += 0.1 + bad_simplices.append(simplex) + + simplex = np.zeros((3, 3)) + bad_simplices.append(simplex) + + for simplex in bad_simplices: + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': False, 'initial_simplex': simplex} + assert_raises(ValueError, + optimize.minimize, + self.func, + self.startparams, + args=(), + method='Nelder-mead', + options=opts) + else: + assert_raises(ValueError, optimize.fmin, + self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + def test_neldermead_x0_ub(self): + # checks whether minimisation occurs correctly for entries where + # x0 == ub + # gh19991 + def quad(x): + return np.sum(x**2) + + res = optimize.minimize( + quad, + [1], + bounds=[(0, 1.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0]) + + res = optimize.minimize( + quad, + [1, 2], + bounds=[(0, 1.), (1, 3.)], + method='nelder-mead' + ) + assert_allclose(res.x, [0, 1]) + + def test_ncg_negative_maxiter(self): + # Regression test for gh-8241 + opts = {'maxiter': -1} + result = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts) + assert result.status == 1 + + def test_ncg_zero_xtol(self): + # Regression test for gh-20214 + def cosine(x): + return np.cos(x[0]) + + def jac(x): + return -np.sin(x[0]) + + x0 = [0.1] + xtol = 0 + result = optimize.minimize(cosine, + x0=x0, + jac=jac, + method="newton-cg", + options=dict(xtol=xtol)) + assert result.status == 0 + assert_almost_equal(result.x[0], np.pi) + + def test_ncg(self): + # line-search Newton conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 7, self.funccalls + assert self.gradcalls <= 22, self.gradcalls # 0.13.0 + # assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hess(self): + # Newton conjugate gradient with Hessian + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hess=self.hess, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess=self.hess, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls <= 7, self.funccalls # gh10673 + assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hessp(self): + # Newton conjugate gradient with Hessian times a vector p. + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hessp=self.hessp, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess_p=self.hessp, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls <= 7, self.funccalls # gh10673 + assert self.gradcalls <= 18, self.gradcalls # 0.9.0 + # assert self.gradcalls == 18, self.gradcalls # 0.8.0 + # assert self.gradcalls == 22, self.gradcalls # 0.7.0 + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + +def test_maxfev_test(): + rng = np.random.default_rng(271707100830272976862395227613146332411) + + def cost(x): + return rng.random(1) * 1000 # never converged problem + + for imaxfev in [1, 10, 50]: + # "TNC" and "L-BFGS-B" also supports max function evaluation, but + # these may violate the limit because of evaluating gradients + # by numerical differentiation. See the discussion in PR #14805. + for method in ['Powell', 'Nelder-Mead']: + result = optimize.minimize(cost, rng.random(10), + method=method, + options={'maxfev': imaxfev}) + assert result["nfev"] == imaxfev + + +def test_wrap_scalar_function_with_validation(): + + def func_(x): + return x + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + for i in range(5): + func(np.asarray(i)) + assert fcalls[0] == i+1 + + msg = "Too many function calls" + with assert_raises(optimize._optimize._MaxFuncCallError, match=msg): + func(np.asarray(i)) # exceeded maximum function call + + fcalls, func = optimize._optimize.\ + _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) + + msg = "The user-provided objective function must return a scalar value." + with assert_raises(ValueError, match=msg): + func(np.array([1, 1])) + + +def test_obj_func_returns_scalar(): + match = ("The user-provided " + "objective function must " + "return a scalar value.") + with assert_raises(ValueError, match=match): + optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS') + + +def test_neldermead_iteration_num(): + x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) + res = optimize._minimize._minimize_neldermead(optimize.rosen, x0, + xatol=1e-8) + assert res.nit <= 339 + + +def test_neldermead_respect_fp(): + # Nelder-Mead should respect the fp type of the input + function + x0 = np.array([5.0, 4.0]).astype(np.float32) + def rosen_(x): + assert x.dtype == np.float32 + return optimize.rosen(x) + + optimize.minimize(rosen_, x0, method='Nelder-Mead') + + +def test_neldermead_xatol_fatol(): + # gh4484 + # test we can call with fatol, xatol specified + def func(x): + return x[0] ** 2 + x[1] ** 2 + + optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2, + xatol=1e-3, fatol=1e-3) + + +def test_neldermead_adaptive(): + def func(x): + return np.sum(x ** 2) + p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, + 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652, + 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474] + + res = optimize.minimize(func, p0, method='Nelder-Mead') + assert_equal(res.success, False) + + res = optimize.minimize(func, p0, method='Nelder-Mead', + options={'adaptive': True}) + assert_equal(res.success, True) + + +def test_bounded_powell_outsidebounds(): + # With the bounded Powell method if you start outside the bounds the final + # should still be within the bounds (provided that the user doesn't make a + # bad choice for the `direc` argument). + def func(x): + return np.sum(x ** 2) + bounds = (-1, 1), (-1, 1), (-1, 1) + x0 = [-4, .5, -.8] + + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, bounds=bounds, method="Powell") + assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6) + assert_equal(res.success, True) + assert_equal(res.status, 0) + + # However, now if we change the `direc` argument such that the + # set of vectors does not span the parameter space, then we may + # not end up back within the bounds. Here we see that the first + # parameter cannot be updated! + direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res = optimize.minimize(func, x0, + bounds=bounds, method="Powell", + options={'direc': direc}) + assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6) + assert_equal(res.success, False) + assert_equal(res.status, 4) + + +def test_bounded_powell_vs_powell(): + # here we test an example where the bounded Powell method + # will return a different result than the standard Powell + # method. + + # first we test a simple example where the minimum is at + # the origin and the minimum that is within the bounds is + # larger than the minimum at the origin. + def func(x): + return np.sum(x ** 2) + bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2) + x0 = [-2.1, -5.2, 1.9, 0, -2] + + options = {'ftol': 1e-10, 'xtol': 1e-10} + + res_powell = optimize.minimize(func, x0, method="Powell", options=options) + assert_allclose(res_powell.x, 0., atol=1e-6) + assert_allclose(res_powell.fun, 0., atol=1e-6) + + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # now we test bounded Powell but with a mix of inf bounds. + bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2) + res_bounded_powell = optimize.minimize(func, x0, options=options, + bounds=bounds, + method="Powell") + p = np.array([-1, -0.1, 1, 0, -2]) + assert_allclose(res_bounded_powell.x, p, atol=1e-6) + assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) + + # next we test an example where the global minimum is within + # the bounds, but the bounded Powell method performs better + # than the standard Powell method. + def func(x): + t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1]) + t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2])) + return t**2 + + bounds = [(-2, 5)] * 3 + x0 = [-0.5, -0.5, -0.5] + + res_powell = optimize.minimize(func, x0, method="Powell") + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6) + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + # next we test the previous example where the we provide Powell + # with (-inf, inf) bounds, and compare it to providing Powell + # with no bounds. They should end up the same. + bounds = [(-np.inf, np.inf)] * 3 + + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6) + assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6) + assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6) + + # now test when x0 starts outside of the bounds. + x0 = [45.46254415, -26.52351498, 31.74830248] + bounds = [(-2, 5)] * 3 + # we're starting outside the bounds, so we should get a warning + with assert_warns(optimize.OptimizeWarning): + res_bounded_powell = optimize.minimize(func, x0, + bounds=bounds, + method="Powell") + assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) + + +def test_onesided_bounded_powell_stability(): + # When the Powell method is bounded on only one side, a + # np.tan transform is done in order to convert it into a + # completely bounded problem. Here we do some simple tests + # of one-sided bounded Powell where the optimal solutions + # are large to test the stability of the transformation. + kwargs = {'method': 'Powell', + 'bounds': [(-np.inf, 1e6)] * 3, + 'options': {'ftol': 1e-8, 'xtol': 1e-8}} + x0 = [1, 1, 1] + + # df/dx is constant. + def f(x): + return -np.sum(x) + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -3e6, atol=1e-4) + + # df/dx gets smaller and smaller. + def f(x): + return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** (0.1)) + + # df/dx gets larger and larger. + def f(x): + return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1) + + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7) + + # df/dx gets larger for some of the variables and smaller for others. + def f(x): + t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1) + t *= (1 if np.all(x > 0) else -1) + return t + + kwargs['bounds'] = [(-np.inf, 1e3)] * 3 + res = optimize.minimize(f, x0, **kwargs) + assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7) + + +class TestOptimizeWrapperDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = True + + +class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = False + + +class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = True + + +class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = False + + +class TestOptimizeSimple(CheckOptimize): + + def test_bfgs_nan(self): + # Test corner case where nan is fed to optimizer. See gh-2067. + def func(x): + return x + def fprime(x): + return np.ones_like(x) + x0 = [np.nan] + with np.errstate(over='ignore', invalid='ignore'): + x = optimize.fmin_bfgs(func, x0, fprime, disp=False) + assert np.isnan(func(x)) + + def test_bfgs_nan_return(self): + # Test corner cases where fun returns NaN. See gh-4793. + + # First case: NaN from first call. + def func(x): + return np.nan + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0) + + assert np.isnan(result['fun']) + assert result['success'] is False + + # Second case: NaN from second call. + def func(x): + return 0 if x == 0 else np.nan + def fprime(x): + return np.ones_like(x) # Steer away from zero. + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0, jac=fprime) + + assert np.isnan(result['fun']) + assert result['success'] is False + + def test_bfgs_numerical_jacobian(self): + # BFGS with numerical Jacobian and a vector epsilon parameter. + # define the epsilon parameter using a random vector + epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution)) + + params = optimize.fmin_bfgs(self.func, self.startparams, + epsilon=epsilon, args=(), + maxiter=self.maxiter, disp=False) + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_jac(self): + methods = ['BFGS', 'CG', 'TNC'] + jacs = ['2-point', '3-point', None] + for method, jac in itertools.product(methods, jacs): + result = optimize.minimize(self.func, self.startparams, + method=method, jac=jac) + assert_allclose(self.func(result.x), self.func(self.solution), + atol=1e-6) + + def test_finite_differences_hess(self): + # test that all the methods that require hess can use finite-difference + # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is + # wrapped in a hessp function + # dogleg, trust-exact actually require true hessians at the moment, so + # they're excluded. + methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov'] + hesses = FD_METHODS + (optimize.BFGS,) + for method, hess in itertools.product(methods, hesses): + if hess is optimize.BFGS: + hess = hess() + result = optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=hess) + assert result.success + + # check that the methods demand some sort of Hessian specification + # Newton-CG creates its own hessp, and trust-constr doesn't need a hess + # specified either + methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact'] + for method in methods: + with pytest.raises(ValueError): + optimize.minimize(self.func, self.startparams, + method=method, jac=self.grad, + hess=None) + + def test_bfgs_gh_2169(self): + def f(x): + if x < 0: + return 1.79769313e+308 + else: + return x + 1./x + xs = optimize.fmin_bfgs(f, [10.], disp=False) + assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) + + def test_bfgs_double_evaluations(self): + # check BFGS does not evaluate twice in a row at same point + def f(x): + xp = x[0] + assert xp not in seen + seen.add(xp) + return 10*x**2, 20*x + + seen = set() + optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7) + + def test_l_bfgs_b(self): + # limited-memory bound-constrained BFGS algorithm + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + self.grad, args=(), + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # SciPy 0.7.0. Don't allow them to increase. + assert self.funccalls == 7, self.funccalls + assert self.gradcalls == 5, self.gradcalls + + # Ensure that the function behaves the same; this is from SciPy 0.7.0 + # test fixed in gh10673 + assert_allclose(self.trace[3:5], + [[8.117083e-16, -5.196198e-01, 4.897617e-01], + [0., -0.52489628, 0.48753042]], + atol=1e-14, rtol=1e-7) + + def test_l_bfgs_b_numjac(self): + # L-BFGS-B with numerical Jacobian + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + approx_grad=True, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with combined objective function and Jacobian + def fun(x): + return self.func(x), self.grad(x) + + retval = optimize.fmin_l_bfgs_b(fun, self.startparams, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_maxiter(self): + # gh7854 + # Ensure that not more than maxiters are ever run. + class Callback: + def __init__(self): + self.nit = 0 + self.fun = None + self.x = None + + def __call__(self, x): + self.x = x + self.fun = optimize.rosen(x) + self.nit += 1 + + c = Callback() + res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b', + callback=c, options={'maxiter': 5}) + + assert_equal(res.nit, 5) + assert_almost_equal(res.x, c.x) + assert_almost_equal(res.fun, c.fun) + assert_equal(res.status, 1) + assert res.success is False + assert_equal(res.message, + 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT') + + def test_minimize_l_bfgs_b(self): + # Minimize with L-BFGS-B method + opts = {'disp': False, 'maxiter': self.maxiter} + r = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + assert_allclose(self.func(r.x), self.func(self.solution), + atol=1e-6) + assert self.gradcalls == r.njev + + self.funccalls = self.gradcalls = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', options=opts) + # check that function evaluations in approximate jacobian are counted + # assert_(ra.nfev > r.nfev) + assert self.funccalls == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + self.funccalls = self.gradcalls = 0 + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, jac='3-point', + method='L-BFGS-B', options=opts) + assert self.funccalls == ra.nfev + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + + def test_minimize_l_bfgs_b_ftol(self): + # Check that the `ftol` parameter in l_bfgs_b works as expected + v0 = None + for tol in [1e-1, 1e-4, 1e-7, 1e-10]: + opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol} + sol = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + v = self.func(sol.x) + + if v0 is None: + v0 = v + else: + assert v < v0 + + assert_allclose(v, self.func(self.solution), rtol=tol) + + def test_minimize_l_bfgs_maxls(self): + # check that the maxls is passed down to the Fortran routine + sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]), + method='L-BFGS-B', jac=optimize.rosen_der, + options={'disp': False, 'maxls': 1}) + assert not sol.success + + def test_minimize_l_bfgs_b_maxfun_interruption(self): + # gh-6162 + f = optimize.rosen + g = optimize.rosen_der + values = [] + x0 = np.full(7, 1000) + + def objfun(x): + value = f(x) + values.append(value) + return value + + # Look for an interesting test case. + # Request a maxfun that stops at a particularly bad function + # evaluation somewhere between 100 and 300 evaluations. + low, medium, high = 30, 100, 300 + optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high) + v, k = max((y, i) for i, y in enumerate(values[medium:])) + maxfun = medium + k + # If the minimization strategy is reasonable, + # the minimize() result should not be worse than the best + # of the first 30 function evaluations. + target = min(values[:low]) + xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun) + assert_array_less(fmin, target) + + def test_custom(self): + # This function comes from the documentation example. + def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = x0 + besty = fun(x0) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for dim in range(np.size(x0)): + for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]: + testx = np.copy(bestx) + testx[dim] = s + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + x0 = [1.35, 0.9, 0.8, 1.1, 1.2] + res = optimize.minimize(optimize.rosen, x0, method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) + + def test_gh10771(self): + # check that minimize passes bounds and constraints to a custom + # minimizer without altering them. + bounds = [(-2, 2), (0, 3)] + constraints = 'constraints' + + def custmin(fun, x0, **options): + assert options['bounds'] is bounds + assert options['constraints'] is constraints + return optimize.OptimizeResult() + + x0 = [1, 1] + optimize.minimize(optimize.rosen, x0, method=custmin, + bounds=bounds, constraints=constraints) + + def test_minimize_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return x**2*y**2 + x**4 + 1 + + def dfunc(z): + x, y = z + return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) + + for method in ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp']: + if method in ('nelder-mead', 'powell', 'cobyla'): + jac = None + else: + jac = dfunc + + sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10, + method=method) + sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0, + method=method) + assert func(sol1.x) < func(sol2.x), \ + f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" + + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547 + @pytest.mark.parametrize('method', + ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs', + 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc', + 'fmin_slsqp'] + MINIMIZE_METHODS) + def test_minimize_callback_copies_array(self, method): + # Check that arrays passed to callbacks are not modified + # inplace by the optimizer afterward + + if method in ('fmin_tnc', 'fmin_l_bfgs_b'): + def func(x): + return optimize.rosen(x), optimize.rosen_der(x) + else: + func = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + x0 = np.zeros(10) + + # Set options + kwargs = {} + if method.startswith('fmin'): + routine = getattr(optimize, method) + if method == 'fmin_slsqp': + kwargs['iter'] = 5 + elif method == 'fmin_tnc': + kwargs['maxfun'] = 100 + elif method in ('fmin', 'fmin_powell'): + kwargs['maxiter'] = 3500 + else: + kwargs['maxiter'] = 5 + else: + def routine(*a, **kw): + kw['method'] = method + return optimize.minimize(*a, **kw) + + if method == 'tnc': + kwargs['options'] = dict(maxfun=100) + else: + kwargs['options'] = dict(maxiter=5) + + if method in ('fmin_ncg',): + kwargs['fprime'] = jac + elif method in ('newton-cg',): + kwargs['jac'] = jac + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = jac + kwargs['hess'] = hess + + # Run with callback + results = [] + + def callback(x, *args, **kwargs): + assert not isinstance(x, optimize.OptimizeResult) + results.append((x, np.copy(x))) + + routine(func, x0, callback=callback, **kwargs) + + # Check returned arrays coincide with their copies + # and have no memory overlap + assert len(results) > 2 + assert all(np.all(x == y) for x, y in results) + combinations = itertools.combinations(results, 2) + assert not any(np.may_share_memory(x[0], y[0]) for x, y in combinations) + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', + 'bfgs', 'newton-cg', 'l-bfgs-b', + 'tnc', 'cobyla', 'slsqp']) + def test_no_increase(self, method): + # Check that the solver doesn't return a value worse than the + # initial point. + + def func(x): + return (x - 1)**2 + + def bad_grad(x): + # purposefully invalid gradient function, simulates a case + # where line searches start failing + return 2*(x - 1) * (-1) - 2 + + x0 = np.array([2.0]) + f0 = func(x0) + jac = bad_grad + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + if method in ['nelder-mead', 'powell', 'cobyla']: + jac = None + sol = optimize.minimize(func, x0, jac=jac, method=method, + options=options) + assert_equal(func(sol.x), sol.fun) + + if method == 'slsqp': + pytest.xfail("SLSQP returns slightly worse") + assert func(sol.x) <= f0 + + def test_slsqp_respect_bounds(self): + # Regression test for gh-3108 + def f(x): + return sum((x - np.array([1., 2., 3., 4.]))**2) + + def cons(x): + a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]]) + return np.concatenate([np.dot(a, x) + np.array([5, 10]), x]) + + x0 = np.array([0.5, 1., 1.5, 2.]) + res = optimize.minimize(f, x0, method='slsqp', + constraints={'type': 'ineq', 'fun': cons}) + assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) + + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS', + 'Newton-CG', 'L-BFGS-B', 'SLSQP', + 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov']) + def test_respect_maxiter(self, method): + # Check that the number of iterations equals max_iter, assuming + # convergence doesn't establish before + MAXITER = 4 + + x0 = np.zeros(10) + + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + + # Set options + kwargs = {'method': method, 'options': dict(maxiter=MAXITER)} + + if method in ('Newton-CG',): + kwargs['jac'] = sf.grad + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = sf.grad + kwargs['hess'] = sf.hess + + sol = optimize.minimize(sf.fun, x0, **kwargs) + assert sol.nit == MAXITER + assert sol.nfev >= sf.nfev + if hasattr(sol, 'njev'): + assert sol.njev >= sf.ngev + + # method specific tests + if method == 'SLSQP': + assert sol.status == 9 # Iteration limit reached + + @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', + 'fmin', 'fmin_powell']) + def test_runtime_warning(self, method): + x0 = np.zeros(10) + sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, + optimize.rosen_hess, None, None) + options = {"maxiter": 1, "disp": True} + with pytest.warns(RuntimeWarning, + match=r'Maximum number of iterations'): + if method.startswith('fmin'): + routine = getattr(optimize, method) + routine(sf.fun, x0, **options) + else: + optimize.minimize(sf.fun, x0, method=method, options=options) + + def test_respect_maxiter_trust_constr_ineq_constraints(self): + # special case of minimization with trust-constr and inequality + # constraints to check maxiter limit is obeyed when using internal + # method 'tr_interior_point' + MAXITER = 4 + f = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + def fun(x): + return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + cons = ({'type': 'ineq', + 'fun': fun},) + + x0 = np.zeros(10) + sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess, + method='trust-constr', + options=dict(maxiter=MAXITER)) + assert sol.nit == MAXITER + + def test_minimize_automethod(self): + def f(x): + return x**2 + + def cons(x): + return x - 2 + + x0 = np.array([10.]) + sol_0 = optimize.minimize(f, x0) + sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', + 'fun': cons}]) + sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)]) + sol_3 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(5, 10)]) + sol_4 = optimize.minimize(f, x0, + constraints=[{'type': 'ineq', 'fun': cons}], + bounds=[(1, 10)]) + for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]: + assert sol.success + assert_allclose(sol_0.x, 0, atol=1e-7) + assert_allclose(sol_1.x, 2, atol=1e-7) + assert_allclose(sol_2.x, 5, atol=1e-7) + assert_allclose(sol_3.x, 5, atol=1e-7) + assert_allclose(sol_4.x, 2, atol=1e-7) + + def test_minimize_coerce_args_param(self): + # Regression test for gh-3503 + def Y(x, c): + return np.sum((x-c)**2) + + def dY_dx(x, c=None): + return 2*(x-c) + + c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) + xinit = np.random.randn(len(c)) + optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS") + + def test_initial_step_scaling(self): + # Check that optimizer initial step is not huge even if the + # function and gradients are + + scales = [1e-50, 1, 1e50] + methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG'] + + def f(x): + if first_step_size[0] is None and x[0] != x0[0]: + first_step_size[0] = abs(x[0] - x0[0]) + if abs(x).max() > 1e4: + raise AssertionError("Optimization stepped far away!") + return scale*(x[0] - 1)**2 + + def g(x): + return np.array([scale*(x[0] - 1)]) + + for scale, method in itertools.product(scales, methods): + if method in ('CG', 'BFGS'): + options = dict(gtol=scale*1e-8) + else: + options = dict() + + if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'): + # XXX: return initial point if they see small gradient + continue + + x0 = [-1.0] + first_step_size = [None] + res = optimize.minimize(f, x0, jac=g, method=method, + options=options) + + err_msg = f"{method} {scale}: {first_step_size}: {res}" + + assert res.success, err_msg + assert_allclose(res.x, [1.0], err_msg=err_msg) + assert res.nit <= 3, err_msg + + if scale > 1e-10: + if method in ('CG', 'BFGS'): + assert_allclose(first_step_size[0], 1.01, err_msg=err_msg) + else: + # Newton-CG and L-BFGS-B use different logic for the first + # step, but are both scaling invariant with step sizes ~ 1 + assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg + else: + # step size has upper bound of ||grad||, so line + # search makes many small steps + pass + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp', 'trust-constr', + 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']) + def test_nan_values(self, method): + # Check nan values result to failed exit status + np.random.seed(1234) + + count = [0] + + def func(x): + return np.nan + + def func2(x): + count[0] += 1 + if count[0] > 2: + return np.nan + else: + return np.random.rand() + + def grad(x): + return np.array([1.0]) + + def hess(x): + return np.array([[1.0]]) + + x0 = np.array([1.0]) + + needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg') + needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg') + + funcs = [func, func2] + grads = [grad] if needs_grad else [grad, None] + hesss = [hess] if needs_hess else [hess, None] + options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + for f, g, h in itertools.product(funcs, grads, hesss): + count = [0] + sol = optimize.minimize(f, x0, jac=g, hess=h, method=method, + options=options) + assert_equal(sol.success, False) + + @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs', + 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp', 'trust-constr', + 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']) + def test_duplicate_evaluations(self, method): + # check that there are no duplicate evaluations for any methods + jac = hess = None + if method in ('newton-cg', 'trust-krylov', 'trust-exact', + 'trust-ncg', 'dogleg'): + jac = self.grad + if method in ('trust-krylov', 'trust-exact', 'trust-ncg', + 'dogleg'): + hess = self.hess + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + # for trust-constr + sup.filter(UserWarning, "delta_grad == 0.*") + optimize.minimize(self.func, self.startparams, + method=method, jac=jac, hess=hess) + + for i in range(1, len(self.trace)): + if np.array_equal(self.trace[i - 1], self.trace[i]): + raise RuntimeError( + f"Duplicate evaluations made by {method}") + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB) + @pytest.mark.parametrize('new_cb_interface', [0, 1, 2]) + def test_callback_stopiteration(self, method, new_cb_interface): + # Check that if callback raises StopIteration, optimization + # terminates with the same result as if iterations were limited + + def f(x): + f.flag = False # check that f isn't called after StopIteration + return optimize.rosen(x) + f.flag = False + + def g(x): + f.flag = False + return optimize.rosen_der(x) + + def h(x): + f.flag = False + return optimize.rosen_hess(x) + + maxiter = 5 + + if new_cb_interface == 1: + def callback_interface(*, intermediate_result): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + elif new_cb_interface == 2: + class Callback: + def __call__(self, intermediate_result: OptimizeResult): + assert intermediate_result.fun == f(intermediate_result.x) + callback() + callback_interface = Callback() + else: + def callback_interface(xk, *args): # type: ignore[misc] + callback() + + def callback(): + callback.i += 1 + callback.flag = False + if callback.i == maxiter: + callback.flag = True + raise StopIteration() + callback.i = 0 + callback.flag = False + + kwargs = {'x0': [1.1]*5, 'method': method, + 'fun': f, 'jac': g, 'hess': h} + + res = optimize.minimize(**kwargs, callback=callback_interface) + if method == 'nelder-mead': + maxiter = maxiter + 1 # nelder-mead counts differently + ref = optimize.minimize(**kwargs, options={'maxiter': maxiter}) + assert res.fun == ref.fun + assert_equal(res.x, ref.x) + assert res.nit == ref.nit == maxiter + assert res.status == (3 if method == 'trust-constr' else 99) + + def test_ndim_error(self): + msg = "'x0' must only have one dimension." + with assert_raises(ValueError, match=msg): + optimize.minimize(lambda x: x, np.ones((2, 1))) + + @pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc', + 'powell', 'cobyla', 'trust-constr')) + def test_minimize_invalid_bounds(self, method): + def f(x): + return np.sum(x**2) + + bounds = Bounds([1, 2], [3, 4]) + msg = 'The number of bounds is not compatible with the length of `x0`.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + bounds = Bounds([1, 6, 1], [3, 4, 2]) + msg = 'An upper bound is less than the corresponding lower bound.' + with pytest.raises(ValueError, match=msg): + optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) + + @pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell']) + def test_minimize_warnings_gh1953(self, method): + # test that minimize methods produce warnings rather than just using + # `print`; see gh-1953. + kwargs = {} if method=='powell' else {'jac': optimize.rosen_der} + warning_type = (RuntimeWarning if method=='powell' + else optimize.OptimizeWarning) + + options = {'disp': True, 'maxiter': 10} + with pytest.warns(warning_type, match='Maximum number'): + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + options['disp'] = False + optimize.minimize(lambda x: optimize.rosen(x), [0, 0], + method=method, options=options, **kwargs) + + +@pytest.mark.parametrize( + 'method', + ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead'] +) +def test_minimize_with_scalar(method): + # checks that minimize works with a scalar being provided to it. + def f(x): + return np.sum(x ** 2) + + res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method) + assert res.success + assert_allclose(res.x, [0.0], atol=1e-5) + + +class TestLBFGSBBounds: + def setup_method(self): + self.bounds = ((1, None), (None, None)) + self.solution = (1, 0) + + def fun(self, x, p=2.0): + return 1.0 / p * (x[0]**p + x[1]**p) + + def jac(self, x, p=2.0): + return x**(p - 1) + + def fj(self, x, p=2.0): + return self.fun(x, p), self.jac(x, p) + + def test_l_bfgs_b_bounds(self): + x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1], + fprime=self.jac, + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with fun and jac combined and extra arguments + x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ), + bounds=self.bounds) + assert d['warnflag'] == 0, d['task'] + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_l_bfgs_b_bounds(self): + # Minimize with method='L-BFGS-B' with bounds + res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=self.bounds) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + @pytest.mark.parametrize('bounds', [ + ([(10, 1), (1, 10)]), + ([(1, 10), (10, 1)]), + ([(10, 1), (10, 1)]) + ]) + def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds): + with pytest.raises(ValueError, match='.*bound.*'): + optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=bounds) + + def test_minimize_l_bfgs_b_bounds_FD(self): + # test that initial starting value outside bounds doesn't raise + # an error (done with clipping). + # test all different finite differences combos, with and without args + + jacs = ['2-point', '3-point', None] + argss = [(2.,), ()] + for jac, args in itertools.product(jacs, argss): + res = optimize.minimize(self.fun, [0, -1], args=args, + method='L-BFGS-B', + jac=jac, bounds=self.bounds, + options={'finite_diff_rel_step': None}) + assert res['success'], res['message'] + assert_allclose(res.x, self.solution, atol=1e-6) + + +class TestOptimizeScalar: + def setup_method(self): + self.solution = 1.5 + + def fun(self, x, a=1.5): + """Objective function""" + return (x - a)**2 - 0.8 + + def test_brent(self): + x = optimize.brent(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.brent(self.fun, brack=(0, -1, 1)) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_golden(self): + x = optimize.golden(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, tol=0) + assert_allclose(x, self.solution) + + maxiter_test_cases = [0, 1, 5] + for maxiter in maxiter_test_cases: + x0 = optimize.golden(self.fun, maxiter=0, full_output=True) + x = optimize.golden(self.fun, maxiter=maxiter, full_output=True) + nfev0, nfev = x0[2], x[2] + assert_equal(nfev - nfev0, maxiter) + + message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(-1, 0, 1)) + + message = r"\(xa < xb\) and \(xb < xc\)" + with pytest.raises(ValueError, match=message): + optimize.golden(self.fun, brack=(0, -1, 1)) + + def test_fminbound(self): + x = optimize.fminbound(self.fun, 0, 1) + assert_allclose(x, 1, atol=1e-4) + + x = optimize.fminbound(self.fun, 1, 5) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.fminbound(self.fun, np.array([1]), np.array([5])) + assert_allclose(x, self.solution, atol=1e-6) + assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) + + def test_fminbound_scalar(self): + with pytest.raises(ValueError, match='.*must be finite scalars.*'): + optimize.fminbound(self.fun, np.zeros((1, 2)), 1) + + x = optimize.fminbound(self.fun, 1, np.array(5)) + assert_allclose(x, self.solution, atol=1e-6) + + def test_gh11207(self): + def fun(x): + return x**2 + optimize.fminbound(fun, 0, 0) + + def test_minimize_scalar(self): + # combine all tests above for the minimize_scalar wrapper + x = optimize.minimize_scalar(self.fun).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent') + assert x.success + + x = optimize.minimize_scalar(self.fun, method='Brent', + options=dict(maxiter=3)) + assert not x.success + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='golden', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,), + method='Bounded').x + assert_allclose(x, 1, atol=1e-4) + + x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]), + np.array([5])), + args=(np.array([1.5]), ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(5, 1), method='bounded', args=(1.5, )) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(np.zeros(2), 1), method='bounded', args=(1.5, )) + + x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_scalar_custom(self): + # This function comes from the documentation example. + def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = (bracket[1] + bracket[0]) / 2.0 + besty = fun(bestx) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for testx in [bestx - stepsize, bestx + stepsize]: + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + res = optimize.minimize_scalar(self.fun, bracket=(0, 4), + method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, self.solution, atol=1e-6) + + def test_minimize_scalar_coerce_args_param(self): + # Regression test for gh-3503 + optimize.minimize_scalar(self.fun, args=1.5) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_disp(self, method): + # test that all minimize_scalar methods accept a disp option. + for disp in [0, 1, 2, 3]: + optimize.minimize_scalar(self.fun, options={"disp": disp}) + + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_result_attributes(self, method): + kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {} + result = optimize.minimize_scalar(self.fun, method=method, **kwargs) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + @pytest.mark.filterwarnings('ignore::UserWarning') + @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) + def test_nan_values(self, method): + # Check nan values result to failed exit status + np.random.seed(1234) + + count = [0] + + def func(x): + count[0] += 1 + if count[0] > 4: + return np.nan + else: + return x**2 + 0.1 * np.sin(x) + + bracket = (-1, 0, 1) + bounds = (-1, 1) + + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.*") + sup.filter(RuntimeWarning, ".*does not use Hessian.*") + sup.filter(RuntimeWarning, ".*does not use gradient.*") + + count = [0] + + kwargs = {"bounds": bounds} if method == 'bounded' else {} + sol = optimize.minimize_scalar(func, bracket=bracket, + **kwargs, method=method, + options=dict(maxiter=20)) + assert_equal(sol.success, False) + + def test_minimize_scalar_defaults_gh10911(self): + # Previously, bounds were silently ignored unless `method='bounds'` + # was chosen. See gh-10911. Check that this is no longer the case. + def f(x): + return x**2 + + res = optimize.minimize_scalar(f) + assert_allclose(res.x, 0, atol=1e-8) + + res = optimize.minimize_scalar(f, bounds=(1, 100), + options={'xatol': 1e-10}) + assert_allclose(res.x, 1) + + def test_minimize_non_finite_bounds_gh10911(self): + # Previously, minimize_scalar misbehaved with infinite bounds. + # See gh-10911. Check that it now raises an error, instead. + msg = "Optimization bounds must be finite scalars." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(1, np.inf)) + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, bounds=(np.nan, 1)) + + @pytest.mark.parametrize("method", ['brent', 'golden']) + def test_minimize_unbounded_method_with_bounds_gh10911(self, method): + # Previously, `bounds` were silently ignored when `method='brent'` or + # `method='golden'`. See gh-10911. Check that error is now raised. + msg = "Use of `bounds` is incompatible with..." + with pytest.raises(ValueError, match=msg): + optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2)) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.parametrize("method", MINIMIZE_SCALAR_METHODS) + @pytest.mark.parametrize("tol", [1, 1e-6]) + @pytest.mark.parametrize("fshape", [(), (1,), (1, 1)]) + def test_minimize_scalar_dimensionality_gh16196(self, method, tol, fshape): + # gh-16196 reported that the output shape of `minimize_scalar` was not + # consistent when an objective function returned an array. Check that + # `res.fun` and `res.x` are now consistent. + def f(x): + return np.array(x**4).reshape(fshape) + + a, b = -0.1, 0.2 + kwargs = (dict(bracket=(a, b)) if method != "bounded" + else dict(bounds=(a, b))) + kwargs.update(dict(method=method, tol=tol)) + + res = optimize.minimize_scalar(f, **kwargs) + assert res.x.shape == res.fun.shape == f(res.x).shape == fshape + + @pytest.mark.parametrize('method', ['bounded', 'brent', 'golden']) + def test_minimize_scalar_warnings_gh1953(self, method): + # test that minimize_scalar methods produce warnings rather than just + # using `print`; see gh-1953. + def f(x): + return (x - 1)**2 + + kwargs = {} + kwd = 'bounds' if method == 'bounded' else 'bracket' + kwargs[kwd] = [-2, 10] + + options = {'disp': True, 'maxiter': 3} + with pytest.warns(optimize.OptimizeWarning, match='Maximum number'): + optimize.minimize_scalar(f, method=method, options=options, + **kwargs) + + options['disp'] = False + optimize.minimize_scalar(f, method=method, options=options, **kwargs) + + +class TestBracket: + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_errors_and_status_false(self): + # Check that `bracket` raises the errors it is supposed to + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, 1) + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -1, np.inf) + with pytest.raises(RuntimeError, match=message): + optimize.brent(f, brack=(-1, 1)) + with pytest.raises(RuntimeError, match=message): + optimize.golden(f, brack=(-1, 1)) + + def f(x): # gh-5899 + return -5 * x**5 + 4 * x**4 - 12 * x**3 + 11 * x**2 - 2 * x + 1 + + message = "No valid bracket was found before the iteration limit..." + with pytest.raises(RuntimeError, match=message): + optimize.bracket(f, -0.5, 0.5, maxiter=10) + + @pytest.mark.parametrize('method', ('brent', 'golden')) + def test_minimize_scalar_success_false(self, method): + # Check that status information from `bracket` gets to minimize_scalar + def f(x): # gh-14858 + return x**2 if ((-1 < x) & (x < 1)) else 100.0 + + message = "The algorithm terminated without finding a valid bracket." + + res = optimize.minimize_scalar(f, bracket=(-1, 1), method=method) + assert not res.success + assert message in res.message + assert res.nfev == 3 + assert res.nit == 0 + assert res.fun == 100 + + +def test_brent_negative_tolerance(): + assert_raises(ValueError, optimize.brent, np.cos, tol=-.01) + + +class TestNewtonCg: + def test_rosenbrock(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.rosen_hess, + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_himmelblau(self): + x0 = np.array(himmelblau_x0) + sol = optimize.minimize(himmelblau, + x0, + jac=himmelblau_grad, + hess=himmelblau_hess, + method='Newton-CG', + tol=1e-6) + assert sol.success, sol.message + assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4) + assert_allclose(sol.fun, himmelblau_min, atol=1e-4) + + def test_finite_difference(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess='2-point', + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_hessian_update_strategy(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.BFGS(), + tol=1e-5, + method='Newton-CG') + assert sol.success, sol.message + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + +def test_line_for_search(): + # _line_for_search is only used in _linesearch_powell, which is also + # tested below. Thus there are more tests of _line_for_search in the + # test_linesearch_powell_bounded function. + + line_for_search = optimize._optimize._line_for_search + # args are x0, alpha, lower_bound, upper_bound + # returns lmin, lmax + + lower_bound = np.array([-5.3, -1, -1.5, -3]) + upper_bound = np.array([1.9, 1, 2.8, 3]) + + # test when starting in the bounds + x0 = np.array([0., 0, 0, 0]) + # and when starting outside of the bounds + x1 = np.array([0., 2, -3, 0]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -1.5, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, 3), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, 1.5), + (x0, np.array([2., 0, -1, 2]), -1.5, 0.95), + (x1, np.array([1., 0, 0, 0]), -5.3, 1.9), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), 1.5, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, 3), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.3, -1.5), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + # now with infinite bounds + lower_bound = np.array([-np.inf, -1, -np.inf, -3]) + upper_bound = np.array([np.inf, 1, 2.8, np.inf]) + + all_tests = ( + (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x0, np.array([0., 1, 0, 0]), -1, 1), + (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8), + (x0, np.array([0., 0, 0, 1]), -3, np.inf), + (x0, np.array([1., 1, 0, 0]), -1, 1), + (x0, np.array([1., 0, -1, 2]), -1.5, np.inf), + (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf), + (x1, np.array([0., 1, 0, 0]), -3, -1), + (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8), + (x1, np.array([0., 0, 0, 1]), -3, np.inf), + (x1, np.array([1., 1, 0, 0]), -3, -1), + (x1, np.array([1., 0, -1, 0]), -5.8, np.inf), + ) + + for x, alpha, lmin, lmax in all_tests: + mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) + assert_allclose(mi, lmin, atol=1e-6) + assert_allclose(ma, lmax, atol=1e-6) + + +def test_linesearch_powell(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p + direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + lower_bound = np.array([-np.inf] * 4) + upper_bound = np.array([np.inf] * 4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, + fval=fval, tol=1e-5) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_linesearch_powell_bounded(): + # helper function in optimize.py, not a public function. + linesearch_powell = optimize._optimize._linesearch_powell + # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 + # returns new_fval, p+direction, direction + def func(x): + return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # first choose bounds such that the same tests from + # test_linesearch_powell should pass. + lower_bound = np.array([-2.]*4) + upper_bound = np.array([2.]*4) + + all_tests = ( + (np.array([1., 0, 0, 0]), -1), + (np.array([0., 1, 0, 0]), 2), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), 1.25), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.65), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose bounds such that unbounded vs bounded gives different results + lower_bound = np.array([-.3]*3 + [-1]) + upper_bound = np.array([.45]*3 + [.9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .45), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), .45), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now mix in inf + p0 = np.array([0., 0, 0, 0]) + fval = func(p0) + + # now choose bounds that mix inf + lower_bound = np.array([-.3, -np.inf, -np.inf, -1]) + upper_bound = np.array([np.inf, .45, np.inf, .9]) + + all_tests = ( + (np.array([1., 0, 0, 0]), -.3), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -.4), + (np.array([-1., 0, 1, 0]), .3), + (np.array([0., 0, 1, 1]), .55), + (np.array([2., 0, -1, 1]), -.15), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(l * xi), atol=1e-6) + assert_allclose(p, l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + # now choose as above but start outside the bounds + p0 = np.array([-1., 0, 0, 2]) + fval = func(p0) + + all_tests = ( + (np.array([1., 0, 0, 0]), .7), + (np.array([0., 1, 0, 0]), .45), + (np.array([0., 0, 1, 0]), 1.5), + (np.array([0., 0, 0, 1]), -2.4), + ) + + for xi, l in all_tests: + f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + assert_allclose(f, func(p0 + l * xi), atol=1e-6) + assert_allclose(p, p0 + l * xi, atol=1e-6) + assert_allclose(direction, l * xi, atol=1e-6) + + +def test_powell_limits(): + # gh15342 - powell was going outside bounds for some function evaluations. + bounds = optimize.Bounds([0, 0], [0.6, 20]) + + def fun(x): + a, b = x + assert (x >= bounds.lb).all() and (x <= bounds.ub).all() + return a ** 2 + b ** 2 + + optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds) + + # Another test from the original report - gh-13411 + bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,]) + + def func(x): + assert x >= 0 and x <= 1 + return np.exp(x) + + optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds) + + +class TestRosen: + + def test_hess(self): + # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775. + x = np.array([3, 4, 5]) + p = np.array([2, 2, 2]) + hp = optimize.rosen_hess_prod(x, p) + dothp = np.dot(optimize.rosen_hess(x), p) + assert_equal(hp, dothp) + + +def himmelblau(p): + """ + R^2 -> R^1 test function for optimization. The function has four local + minima where himmelblau(xopt) == 0. + """ + x, y = p + a = x*x + y - 11 + b = x + y*y - 7 + return a*a + b*b + + +def himmelblau_grad(p): + x, y = p + return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14, + 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22]) + + +def himmelblau_hess(p): + x, y = p + return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y], + [4*x + 4*y, 4*x + 12*y**2 - 26]]) + + +himmelblau_x0 = [-0.27, -0.9] +himmelblau_xopt = [3, 2] +himmelblau_min = 0.0 + + +def test_minimize_multiple_constraints(): + # Regression test for gh-4240. + def func(x): + return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + + def func1(x): + return np.array([x[1]]) + + def func2(x): + return np.array([x[2]]) + + cons = ({'type': 'ineq', 'fun': func}, + {'type': 'ineq', 'fun': func1}, + {'type': 'ineq', 'fun': func2}) + + def f(x): + return -1 * (x[0] + x[1] + x[2]) + + res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) + assert_allclose(res.x, [125, 0, 0], atol=1e-10) + + +class TestOptimizeResultAttributes: + # Test that all minimizers return an OptimizeResult containing + # all the OptimizeResult attributes + def setup_method(self): + self.x0 = [5, 5] + self.func = optimize.rosen + self.jac = optimize.rosen_der + self.hess = optimize.rosen_hess + self.hessp = optimize.rosen_hess_prod + self.bounds = [(0., 10.), (0., 10.)] + + def test_attributes_present(self): + attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun', + 'message'] + skip = {'cobyla': ['nit']} + for method in MINIMIZE_METHODS: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + ("Method .+ does not use (gradient|Hessian.*)" + " information")) + res = optimize.minimize(self.func, self.x0, method=method, + jac=self.jac, hess=self.hess, + hessp=self.hessp) + for attribute in attributes: + if method in skip and attribute in skip[method]: + continue + + assert hasattr(res, attribute) + assert attribute in dir(res) + + # gh13001, OptimizeResult.message should be a str + assert isinstance(res.message, str) + + +def f1(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + +def f2(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + +def f3(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + +def brute_func(z, *params): + return f1(z, *params) + f2(z, *params) + f3(z, *params) + + +class TestBrute: + # Test the "brute force" method + def setup_method(self): + self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + self.solution = np.array([-1.05665192, 1.80834843]) + + def brute_func(self, z, *params): + # an instance method optimizing + return brute_func(z, *params) + + def test_brute(self): + # test fmin + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=optimize.fmin) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test minimize + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), + atol=1e-3) + + # test that brute can optimize an instance method (the other tests use + # a non-class based function + resbrute = optimize.brute(self.brute_func, self.rranges, + args=self.params, full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + + def test_1D(self): + # test that for a 1-D problem the test function is passed an array, + # not a scalar. + def f(x): + assert len(x.shape) == 1 + assert x.shape[0] == 1 + return x ** 2 + + optimize.brute(f, [(-1, 1)], Ns=3, finish=None) + + def test_workers(self): + # check that parallel evaluation works + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None) + + resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None, workers=2) + + assert_allclose(resbrute1[-1], resbrute[-1]) + assert_allclose(resbrute1[0], resbrute[0]) + + def test_runtime_warning(self, capsys): + rng = np.random.default_rng(1234) + + def func(z, *params): + return rng.random(1) * 1000 # never converged problem + + msg = "final optimization did not succeed.*|Maximum number of function eval.*" + with pytest.warns(RuntimeWarning, match=msg): + optimize.brute(func, self.rranges, args=self.params, disp=True) + + def test_coerce_args_param(self): + # optimize.brute should coerce non-iterable args to a tuple. + def f(x, *args): + return x ** args[0] + + resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2) + assert_allclose(resbrute, 0) + + +def test_cobyla_threadsafe(): + + # Verify that cobyla is threadsafe. Will segfault if it is not. + + import concurrent.futures + import time + + def objective1(x): + time.sleep(0.1) + return x[0]**2 + + def objective2(x): + time.sleep(0.1) + return (x[0]-1)**2 + + min_method = "COBYLA" + + def minimizer1(): + return optimize.minimize(objective1, + [0.0], + method=min_method) + + def minimizer2(): + return optimize.minimize(objective2, + [0.0], + method=min_method) + + with concurrent.futures.ThreadPoolExecutor() as pool: + tasks = [] + tasks.append(pool.submit(minimizer1)) + tasks.append(pool.submit(minimizer2)) + for t in tasks: + t.result() + + +class TestIterationLimits: + # Tests that optimisation does not give up before trying requested + # number of iterations or evaluations. And that it does not succeed + # by exceeding the limits. + def setup_method(self): + self.funcalls = 0 + + def slow_func(self, v): + self.funcalls += 1 + r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1]) + return np.sin(r*20 + t)+r*0.5 + + def test_neldermead_limit(self): + self.check_limits("Nelder-Mead", 200) + + def test_powell_limit(self): + self.check_limits("powell", 1000) + + def check_limits(self, method, default_iters): + for start_v in [[0.1, 0.1], [1, 1], [2, 2]]: + for mfev in [50, 500, 5000]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev + else: + assert res["nfev"] >= mfev + for mit in [50, 500, 5000]: + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit}) + if res["success"]: + assert res["nit"] <= mit + else: + assert res["nit"] >= mit + for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + assert res["nfev"] < mfev and res["nit"] <= mit + else: + assert res["nfev"] >= mfev or res["nit"] >= mit + for mfev, mit in [[np.inf, None], [None, np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, + options={"maxiter": mit, + "maxfev": mfev}) + assert self.funcalls == res["nfev"] + if res["success"]: + if mfev is None: + assert res["nfev"] < default_iters*2 + else: + assert res["nit"] <= default_iters*2 + else: + assert (res["nfev"] >= default_iters*2 + or res["nit"] >= default_iters*2) + + +def test_result_x_shape_when_len_x_is_one(): + def fun(x): + return x * x + + def jac(x): + return 2. * x + + def hess(x): + return np.array([[2.]]) + + methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', + 'COBYLA', 'SLSQP'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method) + assert res.x.shape == (1,) + + # use jac + hess + methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'Newton-CG'] + for method in methods: + res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac, + hess=hess) + assert res.x.shape == (1,) + + +class FunctionWithGradient: + def __init__(self): + self.number_of_calls = 0 + + def __call__(self, x): + self.number_of_calls += 1 + return np.sum(x**2), 2 * x + + +@pytest.fixture +def function_with_gradient(): + return FunctionWithGradient() + + +def test_memoize_jac_function_before_gradient(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls == 1 + + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls == 1, \ + "function is not recomputed " \ + "if gradient is requested after function value" + + assert_allclose( + memoized_function(2 * x0), 20.0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_gradient_before_function(function_with_gradient): + memoized_function = MemoizeJac(function_with_gradient) + + x0 = np.array([1.0, 2.0]) + assert_allclose(memoized_function.derivative(x0), 2 * x0) + assert function_with_gradient.number_of_calls == 1 + + assert_allclose(memoized_function(x0), 5.0) + assert function_with_gradient.number_of_calls == 1, \ + "function is not recomputed " \ + "if function value is requested after gradient" + + assert_allclose( + memoized_function.derivative(2 * x0), 4 * x0, + err_msg="different input triggers new computation") + assert function_with_gradient.number_of_calls == 2, \ + "different input triggers new computation" + + +def test_memoize_jac_with_bfgs(function_with_gradient): + """ Tests that using MemoizedJac in combination with ScalarFunction + and BFGS does not lead to repeated function evaluations. + Tests changes made in response to GH11868. + """ + memoized_function = MemoizeJac(function_with_gradient) + jac = memoized_function.derivative + hess = optimize.BFGS() + + x0 = np.array([1.0, 0.5]) + scalar_function = ScalarFunction( + memoized_function, x0, (), jac, hess, None, None) + assert function_with_gradient.number_of_calls == 1 + + scalar_function.fun(x0 + 0.1) + assert function_with_gradient.number_of_calls == 2 + + scalar_function.fun(x0 + 0.2) + assert function_with_gradient.number_of_calls == 3 + + +def test_gh12696(): + # Test that optimize doesn't throw warning gh-12696 + with assert_no_warnings(): + optimize.fminbound( + lambda x: np.array([x**2]), -np.pi, np.pi, disp=False) + + +# --- Test minimize with equal upper and lower bounds --- # + +def setup_test_equal_bounds(): + + np.random.seed(0) + x0 = np.random.rand(4) + lb = np.array([0, 2, -1, -1.0]) + ub = np.array([3, 2, 2, -1.0]) + i_eb = (lb == ub) + + def check_x(x, check_size=True, check_values=True): + if check_size: + assert x.size == 4 + if check_values: + assert_allclose(x[i_eb], lb[i_eb]) + + def func(x): + check_x(x) + return optimize.rosen(x) + + def grad(x): + check_x(x) + return optimize.rosen_der(x) + + def callback(x, *args): + check_x(x) + + def constraint1(x): + check_x(x, check_values=False) + return x[0:1] - 1 + + def jacobian1(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[0] = 1 + return dc + + def constraint2(x): + check_x(x, check_values=False) + return x[2:3] - 0.5 + + def jacobian2(x): + check_x(x, check_values=False) + dc = np.zeros_like(x) + dc[2] = 1 + return dc + + c1a = NonlinearConstraint(constraint1, -np.inf, 0) + c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1) + c2a = NonlinearConstraint(constraint2, -np.inf, 0) + c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2) + + # test using the three methods that accept bounds, use derivatives, and + # have some trouble when bounds fix variables + methods = ('L-BFGS-B', 'SLSQP', 'TNC') + + # test w/out gradient, w/ gradient, and w/ combined objective/gradient + kwds = ({"fun": func, "jac": False}, + {"fun": func, "jac": grad}, + {"fun": (lambda x: (func(x), grad(x))), + "jac": True}) + + # test with both old- and new-style bounds + bound_types = (lambda lb, ub: list(zip(lb, ub)), + Bounds) + + # Test for many combinations of constraints w/ and w/out jacobian + # Pairs in format: (test constraints, reference constraints) + # (always use analytical jacobian in reference) + constraints = ((None, None), ([], []), + (c1a, c1b), (c2b, c2b), + ([c1b], [c1b]), ([c2a], [c2b]), + ([c1a, c2a], [c1b, c2b]), + ([c1a, c2b], [c1b, c2b]), + ([c1b, c2b], [c1b, c2b])) + + # test with and without callback function + callbacks = (None, callback) + + data = {"methods": methods, "kwds": kwds, "bound_types": bound_types, + "constraints": constraints, "callbacks": callbacks, + "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb} + + return data + + +eb_data = setup_test_equal_bounds() + + +# This test is about handling fixed variables, not the accuracy of the solvers +@pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic") +@pytest.mark.parametrize('method', eb_data["methods"]) +@pytest.mark.parametrize('kwds', eb_data["kwds"]) +@pytest.mark.parametrize('bound_type', eb_data["bound_types"]) +@pytest.mark.parametrize('constraints', eb_data["constraints"]) +@pytest.mark.parametrize('callback', eb_data["callbacks"]) +def test_equal_bounds(method, kwds, bound_type, constraints, callback): + """ + Tests that minimizers still work if (bounds.lb == bounds.ub).any() + gh12502 - Divide by zero in Jacobian numerical differentiation when + equality bounds constraints are used + """ + # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882 + if (platform.machine() == 'aarch64' and method == "TNC" + and kwds["jac"] is False and callback is not None): + pytest.skip('Tolerance violation on aarch') + + lb, ub = eb_data["lb"], eb_data["ub"] + x0, i_eb = eb_data["x0"], eb_data["i_eb"] + + test_constraints, reference_constraints = constraints + if test_constraints and not method == 'SLSQP': + pytest.skip('Only SLSQP supports nonlinear constraints') + # reference constraints always have analytical jacobian + # if test constraints are not the same, we'll need finite differences + fd_needed = (test_constraints != reference_constraints) + + bounds = bound_type(lb, ub) # old- or new-style + + kwds.update({"x0": x0, "method": method, "bounds": bounds, + "constraints": test_constraints, "callback": callback}) + res = optimize.minimize(**kwds) + + expected = optimize.minimize(optimize.rosen, x0, method=method, + jac=optimize.rosen_der, bounds=bounds, + constraints=reference_constraints) + + # compare the output of a solution with FD vs that of an analytic grad + assert res.success + assert_allclose(res.fun, expected.fun, rtol=1.5e-6) + assert_allclose(res.x, expected.x, rtol=5e-4) + + if fd_needed or kwds['jac'] is False: + expected.jac[i_eb] = np.nan + assert res.jac.shape[0] == 4 + assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6) + + if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)): + # compare the output to an equivalent FD minimization that doesn't + # need factorization + def fun(x): + new_x = np.array([np.nan, 2, np.nan, -1]) + new_x[[0, 2]] = x + return optimize.rosen(new_x) + + fd_res = optimize.minimize(fun, + x0[[0, 2]], + method=method, + bounds=bounds[::2]) + assert_allclose(res.fun, fd_res.fun) + # TODO this test should really be equivalent to factorized version + # above, down to res.nfev. However, testing found that when TNC is + # called with or without a callback the output is different. The two + # should be the same! This indicates that the TNC callback may be + # mutating something when it shouldn't. + assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6) + + +@pytest.mark.parametrize('method', eb_data["methods"]) +def test_all_bounds_equal(method): + # this only tests methods that have parameters factored out when lb==ub + # it does not test other methods that work with bounds + def f(x, p1=1): + return np.linalg.norm(x) + p1 + + bounds = [(1, 1), (2, 2)] + x0 = (1.0, 3.0) + res = optimize.minimize(f, x0, bounds=bounds, method=method) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + assert res.message == 'All independent variables were fixed by bounds.' + + args = (2,) + res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args) + assert res.success + assert_allclose(res.fun, f([1.0, 2.0], 2)) + + if method.upper() == 'SLSQP': + def con(x): + return np.sum(x) + nlc = NonlinearConstraint(con, -np.inf, 0.0) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is False + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds, but" + assert res.message.startswith(message) + + nlc = NonlinearConstraint(con, -np.inf, 4) + res = optimize.minimize( + f, x0, bounds=bounds, method=method, constraints=[nlc] + ) + assert res.success is True + assert_allclose(res.fun, f([1.0, 2.0])) + assert res.nfev == 1 + message = "All independent variables were fixed by bounds at values" + assert res.message.startswith(message) + + +def test_eb_constraints(): + # make sure constraint functions aren't overwritten when equal bounds + # are employed, and a parameter is factored out. GH14859 + def f(x): + return x[0]**3 + x[1]**2 + x[2]*x[3] + + def cfun(x): + return x[0] + x[1] + x[2] + x[3] - 40 + + constraints = [{'type': 'ineq', 'fun': cfun}] + + bounds = [(0, 20)] * 4 + bounds[1] = (5, 5) + optimize.minimize( + f, + x0=[1, 2, 3, 4], + method='SLSQP', + bounds=bounds, + constraints=constraints, + ) + assert constraints[0]['fun'] == cfun + + +def test_show_options(): + solver_methods = { + 'minimize': MINIMIZE_METHODS, + 'minimize_scalar': MINIMIZE_SCALAR_METHODS, + 'root': ROOT_METHODS, + 'root_scalar': ROOT_SCALAR_METHODS, + 'linprog': LINPROG_METHODS, + 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS, + } + for solver, methods in solver_methods.items(): + for method in methods: + # testing that `show_options` works without error + show_options(solver, method) + + unknown_solver_method = { + 'minimize': "ekki", # unknown method + 'maximize': "cg", # unknown solver + 'maximize_scalar': "ekki", # unknown solver and method + } + for solver, method in unknown_solver_method.items(): + # testing that `show_options` raises ValueError + assert_raises(ValueError, show_options, solver, method) + + +def test_bounds_with_list(): + # gh13501. Bounds created with lists weren't working for Powell. + bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.]) + optimize.minimize( + optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds + ) + + +def test_x_overwritten_user_function(): + # if the user overwrites the x-array in the user function it's likely + # that the minimizer stops working properly. + # gh13740 + def fquad(x): + a = np.arange(np.size(x)) + x -= a + x *= x + return np.sum(x) + + def fquad_jac(x): + a = np.arange(np.size(x)) + x *= 2 + x -= 2 * a + return x + + def fquad_hess(x): + return np.eye(np.size(x)) * 2.0 + + meth_jac = [ + 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov', 'trust-constr' + ] + meth_hess = [ + 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr' + ] + + x0 = np.ones(5) * 1.5 + + for meth in MINIMIZE_METHODS: + jac = None + hess = None + if meth in meth_jac: + jac = fquad_jac + if meth in meth_hess: + hess = fquad_hess + res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess) + assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4) + + +class TestGlobalOptimization: + + def test_optimize_result_attributes(self): + def func(x): + return x ** 2 + + # Note that `brute` solver does not return `OptimizeResult` + results = [optimize.basinhopping(func, x0=1), + optimize.differential_evolution(func, [(-4, 4)]), + optimize.shgo(func, [(-4, 4)]), + optimize.dual_annealing(func, [(-4, 4)]), + optimize.direct(func, [(-4, 4)]), + ] + + for result in results: + assert isinstance(result, optimize.OptimizeResult) + assert hasattr(result, "x") + assert hasattr(result, "success") + assert hasattr(result, "message") + assert hasattr(result, "fun") + assert hasattr(result, "nfev") + assert hasattr(result, "nit") + + +def test_approx_fprime(): + # check that approx_fprime (serviced by approx_derivative) works for + # jac and hess + g = optimize.approx_fprime(himmelblau_x0, himmelblau) + assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6) + + h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad) + assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6) + + +def test_gh12594(): + # gh-12594 reported an error in `_linesearch_powell` and + # `_line_for_search` when `Bounds` was passed lists instead of arrays. + # Check that results are the same whether the inputs are lists or arrays. + + def f(x): + return x[0]**2 + (x[1] - 1)**2 + + bounds = Bounds(lb=[-10, -10], ub=[10, 10]) + res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10])) + ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) + + assert_allclose(res.fun, ref.fun) + assert_allclose(res.x, ref.x) + + +@pytest.mark.parametrize('method', ['Newton-CG', 'trust-constr']) +@pytest.mark.parametrize('sparse_type', [coo_matrix, csc_matrix, csr_matrix, + coo_array, csr_array, csc_array]) +def test_sparse_hessian(method, sparse_type): + # gh-8792 reported an error for minimization with `newton_cg` when `hess` + # returns a sparse matrix. Check that results are the same whether `hess` + # returns a dense or sparse matrix for optimization methods that accept + # sparse Hessian matrices. + + def sparse_rosen_hess(x): + return sparse_type(rosen_hess(x)) + + x0 = [2., 2.] + + res_sparse = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=sparse_rosen_hess) + res_dense = optimize.minimize(rosen, x0, method=method, + jac=rosen_der, hess=rosen_hess) + + assert_allclose(res_dense.fun, res_sparse.fun) + assert_allclose(res_dense.x, res_sparse.x) + assert res_dense.nfev == res_sparse.nfev + assert res_dense.njev == res_sparse.njev + assert res_dense.nhev == res_sparse.nhev diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..6f476be1604a21573b15b5dedfa3ceb47974e2d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py @@ -0,0 +1,431 @@ +import pytest +import numpy as np +from scipy.optimize import quadratic_assignment, OptimizeWarning +from scipy.optimize._qap import _calc_score as _score +from numpy.testing import assert_equal, assert_, assert_warns + + +################ +# Common Tests # +################ + +def chr12c(): + A = [ + [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0], + [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0], + [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0], + [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0], + [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0], + [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0], + [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37], + [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0], + ] + B = [ + [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95], + [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36], + [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63], + [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85], + [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76], + [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34], + [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37], + [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80], + [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33], + [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86], + [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18], + [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0], + ] + A, B = np.array(A), np.array(B) + n = A.shape[0] + + opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n + + return A, B, opt_perm + + +class QAPCommonTests: + """ + Base class for `quadratic_assignment` tests. + """ + def setup_method(self): + np.random.seed(0) + + # Test global optima of problem from Umeyama IVB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_1(self): + # besides testing accuracy, check that A and B can be lists + A = [[0, 3, 4, 2], + [0, 0, 1, 2], + [1, 0, 0, 1], + [0, 0, 1, 0]] + + B = [[0, 4, 2, 4], + [0, 0, 1, 0], + [0, 2, 0, 2], + [0, 1, 2, 0]] + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": False}) + assert_equal(res.fun, 10) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + + if self.method == 'faq': + # Global optimum is 40, but FAQ gets 37 + assert_equal(res.fun, 37) + assert_equal(res.col_ind, np.array([0, 2, 3, 1])) + else: + assert_equal(res.fun, 40) + assert_equal(res.col_ind, np.array([0, 3, 1, 2])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + + # Test global optima of problem from Umeyama IIIB + # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf + # Graph matching maximum is in the paper + # QAP minimum determined by brute force + def test_accuracy_2(self): + + A = np.array([[0, 5, 8, 6], + [5, 0, 5, 1], + [8, 5, 0, 2], + [6, 1, 2, 0]]) + + B = np.array([[0, 1, 8, 4], + [1, 0, 5, 2], + [8, 5, 0, 5], + [4, 2, 5, 0]]) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": False}) + if self.method == 'faq': + # Global optimum is 176, but FAQ gets 178 + assert_equal(res.fun, 178) + assert_equal(res.col_ind, np.array([1, 0, 3, 2])) + else: + assert_equal(res.fun, 176) + assert_equal(res.col_ind, np.array([1, 2, 3, 0])) + + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, "maximize": True}) + assert_equal(res.fun, 286) + assert_equal(res.col_ind, np.array([2, 3, 0, 1])) + + def test_accuracy_3(self): + + A, B, opt_perm = chr12c() + + # basic minimization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # basic maximization + res = quadratic_assignment(A, B, method=self.method, + options={"rng": 0, 'maximize': True}) + assert_(74000 <= res.fun < 85000) + assert_equal(res.fun, _score(A, B, res.col_ind)) + + # check ofv with strictly partial match + seed_cost = np.array([4, 8, 10]) + seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed}) + assert_(11156 <= res.fun < 21000) + assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost]) + + # check performance when partial match is the global optimum + seed = np.asarray([np.arange(len(A)), opt_perm]).T + res = quadratic_assignment(A, B, method=self.method, + options={'partial_match': seed}) + assert_equal(res.col_ind, seed[:, 1].T) + assert_equal(res.fun, 11156) + assert_equal(res.nit, 0) + + # check performance with zero sized matrix inputs + empty = np.empty((0, 0)) + res = quadratic_assignment(empty, empty, method=self.method, + options={"rng": 0}) + assert_equal(res.nit, 0) + assert_equal(res.fun, 0) + + def test_unknown_options(self): + A, B, opt_perm = chr12c() + + def f(): + quadratic_assignment(A, B, method=self.method, + options={"ekki-ekki": True}) + assert_warns(OptimizeWarning, f) + + +class TestFAQ(QAPCommonTests): + method = "faq" + + def test_options(self): + # cost and distance matrices of QAPLIB instance chr12c + A, B, opt_perm = chr12c() + n = len(A) + + # check that max_iter is obeying with low input value + res = quadratic_assignment(A, B, + options={'maxiter': 5}) + assert_equal(res.nit, 5) + + # test with shuffle + res = quadratic_assignment(A, B, + options={'shuffle_input': True}) + assert_(11156 <= res.fun < 21000) + + # test with randomized init + res = quadratic_assignment(A, B, + options={'rng': 1, 'P0': "randomized"}) + assert_(11156 <= res.fun < 21000) + + # check with specified P0 + K = np.ones((n, n)) / float(n) + K = _doubly_stochastic(K) + res = quadratic_assignment(A, B, + options={'P0': K}) + assert_(11156 <= res.fun < 21000) + + def test_specific_input_validation(self): + + A = np.identity(2) + B = A + + # method is implicitly faq + + # ValueError Checks: making sure single value parameters are of + # correct value + with pytest.raises(ValueError, match="Invalid 'P0' parameter"): + quadratic_assignment(A, B, options={'P0': "random"}) + with pytest.raises( + ValueError, match="'maxiter' must be a positive integer"): + quadratic_assignment(A, B, options={'maxiter': -1}) + with pytest.raises(ValueError, match="'tol' must be a positive float"): + quadratic_assignment(A, B, options={'tol': -1}) + + # TypeError Checks: making sure single value parameters are of + # correct type + with pytest.raises(TypeError): + quadratic_assignment(A, B, options={'maxiter': 1.5}) + + # test P0 matrix input + with pytest.raises( + ValueError, + match="`P0` matrix must have shape m' x m', where m'=n-m"): + quadratic_assignment( + np.identity(4), np.identity(4), + options={'P0': np.ones((3, 3))} + ) + + K = [[0.4, 0.2, 0.3], + [0.3, 0.6, 0.2], + [0.2, 0.2, 0.7]] + # matrix that isn't quite doubly stochastic + with pytest.raises( + ValueError, match="`P0` matrix must be doubly stochastic"): + quadratic_assignment( + np.identity(3), np.identity(3), options={'P0': K} + ) + + +class Test2opt(QAPCommonTests): + method = "2opt" + + def test_deterministic(self): + # np.random.seed(0) executes before every method + n = 20 + + A = np.random.rand(n, n) + B = np.random.rand(n, n) + res1 = quadratic_assignment(A, B, method=self.method) + + np.random.seed(0) + + A = np.random.rand(n, n) + B = np.random.rand(n, n) + res2 = quadratic_assignment(A, B, method=self.method) + + assert_equal(res1.nit, res2.nit) + + def test_partial_guess(self): + n = 5 + A = np.random.rand(n, n) + B = np.random.rand(n, n) + + res1 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0}) + guess = np.array([np.arange(5), res1.col_ind]).T + res2 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0, 'partial_guess': guess}) + fix = [2, 4] + match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T + res3 = quadratic_assignment(A, B, method=self.method, + options={'rng': 0, 'partial_guess': guess, + 'partial_match': match}) + assert_(res1.nit != n*(n+1)/2) + assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once + assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once + + def test_specific_input_validation(self): + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_guess` can have only as many entries as"): + quadratic_assignment(np.identity(3), np.identity(3), + method=self.method, + options={'partial_guess': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_guess` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_guess` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_guess` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_guess` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), method=self.method, + options={'partial_guess': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_guess` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), method=self.method, + options={'partial_guess': np.ones((2, 2))} + ) + + +class TestQAPOnce: + def setup_method(self): + np.random.seed(0) + + # these don't need to be repeated for each method + def test_common_input_validation(self): + # test that non square matrices return error + with pytest.raises(ValueError, match="`A` must be square"): + quadratic_assignment( + np.random.random((3, 4)), + np.random.random((3, 3)), + ) + with pytest.raises(ValueError, match="`B` must be square"): + quadratic_assignment( + np.random.random((3, 3)), + np.random.random((3, 4)), + ) + # test that cost and dist matrices have no more than two dimensions + with pytest.raises( + ValueError, match="`A` and `B` must have exactly two"): + quadratic_assignment( + np.random.random((3, 3, 3)), + np.random.random((3, 3, 3)), + ) + # test that cost and dist matrices of different sizes return error + with pytest.raises( + ValueError, + match="`A` and `B` matrices must be of equal size"): + quadratic_assignment( + np.random.random((3, 3)), + np.random.random((4, 4)), + ) + # can't have more seed nodes than cost/dist nodes + _rm = _range_matrix + with pytest.raises( + ValueError, + match="`partial_match` can have only as many seeds as"): + quadratic_assignment(np.identity(3), np.identity(3), + options={'partial_match': _rm(5, 2)}) + # test for only two seed columns + with pytest.raises( + ValueError, match="`partial_match` must have two columns"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': _range_matrix(2, 3)} + ) + # test that seed has no more than two dimensions + with pytest.raises( + ValueError, match="`partial_match` must have exactly two"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.random.rand(3, 2, 2)} + ) + # seeds cannot be negative valued + with pytest.raises( + ValueError, match="`partial_match` must contain only pos"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': -1 * _range_matrix(2, 2)} + ) + # seeds can't have values greater than number of nodes + with pytest.raises( + ValueError, + match="`partial_match` entries must be less than number"): + quadratic_assignment( + np.identity(5), np.identity(5), + options={'partial_match': 2 * _range_matrix(4, 2)} + ) + # columns of seed matrix must be unique + with pytest.raises( + ValueError, + match="`partial_match` column entries must be unique"): + quadratic_assignment( + np.identity(3), np.identity(3), + options={'partial_match': np.ones((2, 2))} + ) + + +def _range_matrix(a, b): + mat = np.zeros((a, b)) + for i in range(b): + mat[:, i] = np.arange(a) + return mat + + +def _doubly_stochastic(P, tol=1e-3): + # cleaner implementation of btaba/sinkhorn_knopp + + max_iter = 1000 + c = 1 / P.sum(axis=0) + r = 1 / (P @ c) + P_eps = P + + for it in range(max_iter): + if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and + (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): + # All column/row sums ~= 1 within threshold + break + + c = 1 / (r @ P) + r = 1 / (P @ c) + P_eps = r[:, None] * P * c + + return P_eps diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..44916ba96293db19756b8222422e76945aa48ebb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py @@ -0,0 +1,40 @@ +"""Regression tests for optimize. + +""" +import numpy as np +from numpy.testing import assert_almost_equal +from pytest import raises as assert_raises + +import scipy.optimize + + +class TestRegression: + + def test_newton_x0_is_0(self): + # Regression test for gh-1601 + tgt = 1 + res = scipy.optimize.newton(lambda x: x - 1, 0) + assert_almost_equal(res, tgt) + + def test_newton_integers(self): + # Regression test for gh-1741 + root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, + fprime=lambda x: 2*x) + assert_almost_equal(root, 1.0) + + def test_lmdif_errmsg(self): + # This shouldn't cause a crash on Python 3 + class SomeError(Exception): + pass + counter = [0] + + def func(x): + counter[0] += 1 + if counter[0] < 3: + return x**2 - np.array([9, 10, 11]) + else: + raise SomeError() + assert_raises(SomeError, + scipy.optimize.leastsq, + func, [1, 2, 3]) + diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..cab46291b91e53a6b5f55cc0185741ca966ba514 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py @@ -0,0 +1,608 @@ +""" +Unit test for SLSQP optimization. +""" +from numpy.testing import (assert_, assert_array_almost_equal, + assert_allclose, assert_equal) +from pytest import raises as assert_raises +import pytest +import numpy as np + +from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint + + +class MyCallBack: + """pass a custom callback function + + This makes sure it's being used. + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x): + self.been_called = True + self.ncalls += 1 + + +class TestSLSQP: + """ + Test SLSQP algorithm using Example 14.4 from Numerical Methods for + Engineers by Steven Chapra and Raymond Canale. + This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, + which has a maximum at x=2, y=1. + """ + def setup_method(self): + self.opts = {'disp': False} + + def fun(self, d, sign=1.0): + """ + Arguments: + d - A list of two elements, where d[0] represents x and d[1] represents y + in the following equation. + sign - A multiplier for f. Since we want to optimize it, and the SciPy + optimizers can only minimize functions, we need to multiply it by + -1 to achieve the desired solution + Returns: + 2*x*y + 2*x - x**2 - 2*y**2 + + """ + x = d[0] + y = d[1] + return sign*(2*x*y + 2*x - x**2 - 2*y**2) + + def jac(self, d, sign=1.0): + """ + This is the derivative of fun, returning a NumPy array + representing df/dx and df/dy. + + """ + x = d[0] + y = d[1] + dfdx = sign*(-2*x + 2*y + 2) + dfdy = sign*(2*x - 4*y) + return np.array([dfdx, dfdy], float) + + def fun_and_jac(self, d, sign=1.0): + return self.fun(d, sign), self.jac(d, sign) + + def f_eqcon(self, x, sign=1.0): + """ Equality constraint """ + return np.array([x[0] - x[1]]) + + def fprime_eqcon(self, x, sign=1.0): + """ Equality constraint, derivative """ + return np.array([[1, -1]]) + + def f_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint """ + return self.f_eqcon(x, sign)[0] + + def fprime_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint, derivative """ + return self.fprime_eqcon(x, sign)[0].tolist() + + def f_ieqcon(self, x, sign=1.0): + """ Inequality constraint """ + return np.array([x[0] - x[1] - 1.0]) + + def fprime_ieqcon(self, x, sign=1.0): + """ Inequality constraint, derivative """ + return np.array([[1, -1]]) + + def f_ieqcon2(self, x): + """ Vector inequality constraint """ + return np.asarray(x) + + def fprime_ieqcon2(self, x): + """ Vector inequality constraint, derivative """ + return np.identity(x.shape[0]) + + # minimize + def test_minimize_unbounded_approximated(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, method='SLSQP', + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_unbounded_given(self): + # Minimize, method='SLSQP': unbounded, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=self.jac, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_approximated(self): + # Minimize, method='SLSQP': bounded, approximated jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + with np.errstate(invalid='ignore'): + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + bounds=((2.5, None), (None, 0.5)), + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2.5, 0.5]) + assert_(2.5 <= res.x[0]) + assert_(res.x[1] <= 0.5) + + def test_minimize_unbounded_combined(self): + # Minimize, method='SLSQP': unbounded, combined function and Jacobian. + res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ), + jac=True, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_equality_approximated(self): + # Minimize with method='SLSQP': equality constraint, approx. jacobian. + jacs = [None, False, '2-point', '3-point'] + for jac in jacs: + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=jac, + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, )}, + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'eq', 'fun':self.f_eqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given2(self): + # Minimize with method='SLSQP': equality constraint, given Jacobian + # for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given_cons_scalar(self): + # Minimize with method='SLSQP': scalar equality constraint, given + # Jacobian for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon_scalar, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon_scalar}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_inequality_given(self): + # Minimize with method='SLSQP': inequality constraint, given Jacobian. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1], atol=1e-3) + + def test_minimize_inequality_given_vector_constraints(self): + # Minimize with method='SLSQP': vector inequality constraint, given + # Jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon2, + 'jac': self.fprime_ieqcon2}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_constraint(self): + # when the constraint makes the solver go up against a parameter + # bound make sure that the numerical differentiation of the + # jacobian doesn't try to exceed that bound using a finite difference. + # gh11403 + def c(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return x[0] ** 0.5 + x[1] + + def f(x): + assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x + return -x[0] ** 2 + x[1] ** 2 + + cns = [NonlinearConstraint(c, 0, 1.5)] + x0 = np.asarray([0.9, 0.5]) + bnd = Bounds([0., 0.], [1.0, 1.0]) + minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns) + + def test_minimize_bound_equality_given2(self): + # Minimize with method='SLSQP': bounds, eq. const., given jac. for + # fun. and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + bounds=[(-0.8, 1.), (-1, 0.8)], + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [0.8, 0.8], atol=1e-3) + assert_(-0.8 <= res.x[0] <= 1) + assert_(-1 <= res.x[1] <= 0.8) + + # fmin_slsqp + def test_unbounded_approximated(self): + # SLSQP: unbounded, approximated Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_unbounded_given(self): + # SLSQP: unbounded, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + fprime = self.jac, iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_equality_approximated(self): + # SLSQP: equality constraint, approximated Jacobian. + res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,), + eqcons = [self.f_eqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given(self): + # SLSQP: equality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + eqcons = [self.f_eqcon], iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given2(self): + # SLSQP: equality constraint, given Jacobian for fun and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_inequality_given(self): + # SLSQP: inequality constraint, given Jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + ieqcons = [self.f_ieqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1], decimal=3) + + def test_bound_equality_given2(self): + # SLSQP: bounds, eq. const., given jac. for fun. and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + bounds = [(-0.8, 1.), (-1, 0.8)], + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [0.8, 0.8], decimal=3) + assert_(-0.8 <= x[0] <= 1) + assert_(-1 <= x[1] <= 0.8) + + def test_scalar_constraints(self): + # Regression test for gh-2182 + x = fmin_slsqp(lambda z: z**2, [3.], + ieqcons=[lambda z: z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + x = fmin_slsqp(lambda z: z**2, [3.], + f_ieqcons=lambda z: [z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + def test_integer_bounds(self): + # This should not raise an exception + fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0) + + def test_array_bounds(self): + # NumPy used to treat n-dimensional 1-element arrays as scalars + # in some cases. The handling of `bounds` by `fmin_slsqp` still + # supports this behavior. + bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))] + x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds, + iprint=0) + assert_array_almost_equal(x, [0, 2]) + + def test_obj_must_return_scalar(self): + # Regression test for Github Issue #5433 + # If objective function does not return a scalar, raises ValueError + with assert_raises(ValueError): + fmin_slsqp(lambda x: [0, 1], [1, 2, 3]) + + def test_obj_returns_scalar_in_list(self): + # Test for Github Issue #5433 and PR #6691 + # Objective function should be able to return length-1 Python list + # containing the scalar + fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0) + + def test_callback(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback + callback = MyCallBack() + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + method='SLSQP', callback=callback, options=self.opts) + assert_(res['success'], res['message']) + assert_(callback.been_called) + assert_equal(callback.ncalls, res['nit']) + + def test_inconsistent_linearization(self): + # SLSQP must be able to solve this problem, even if the + # linearized problem at the starting point is infeasible. + + # Linearized constraints are + # + # 2*x0[0]*x[0] >= 1 + # + # At x0 = [0, 1], the second constraint is clearly infeasible. + # This triggers a call with n2==1 in the LSQ subroutine. + x = [0, 1] + def f1(x): + return x[0] + x[1] - 2 + def f2(x): + return x[0] ** 2 - 1 + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': f1}, + {'type':'ineq','fun': f2}), + bounds=((0,None), (0,None)), + method='SLSQP') + x = sol.x + + assert_allclose(f1(x), 0, atol=1e-8) + assert_(f2(x) >= -1e-8) + assert_(sol.success, sol) + + def test_regression_5743(self): + # SLSQP must not indicate success for this problem, + # which is infeasible. + x = [1, 2] + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1}, + {'type':'ineq','fun': lambda x: x[0]-2}), + bounds=((0,None), (0,None)), + method='SLSQP') + assert_(not sol.success, sol) + + def test_gh_6676(self): + def func(x): + return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2 + + sol = minimize(func, [0, 0, 0], method='SLSQP') + assert_(sol.jac.shape == (3,)) + + def test_invalid_bounds(self): + # Raise correct error when lower bound is greater than upper bound. + # See Github issue 6875. + bounds_list = [ + ((1, 2), (2, 1)), + ((2, 1), (1, 2)), + ((2, 1), (2, 1)), + ((np.inf, 0), (np.inf, 0)), + ((1, -np.inf), (0, 1)), + ] + for bounds in bounds_list: + with assert_raises(ValueError): + minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP') + + def test_bounds_clipping(self): + # + # SLSQP returns bogus results for initial guess out of bounds, gh-6859 + # + def f(x): + return (x[0] - 1)**2 + + sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_infeasible_initial(self): + # Check SLSQP behavior with infeasible initial point + def f(x): + x, = x + return x*x - 2*x + 1 + + cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}] + cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}] + cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x}, + {'type': 'ineq', 'fun': lambda x: x + 1}] + + sol = minimize(f, [10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_inconsistent_inequalities(self): + # gh-7618 + + def cost(x): + return -1 * x[0] + 4 * x[1] + + def ineqcons1(x): + return x[1] - x[0] - 1 + + def ineqcons2(x): + return x[0] - x[1] + + # The inequalities are inconsistent, so no solution can exist: + # + # x1 >= x0 + 1 + # x0 >= x1 + + x0 = (1,5) + bounds = ((-5, 5), (-5, 5)) + cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2)) + res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons) + + assert_(not res.success) + + def test_new_bounds_type(self): + def f(x): + return x[0] ** 2 + x[1] ** 2 + bounds = Bounds([1, 0], [np.inf, np.inf]) + sol = minimize(f, [0, 0], method='slsqp', bounds=bounds) + assert_(sol.success) + assert_allclose(sol.x, [1, 0]) + + def test_nested_minimization(self): + + class NestedProblem: + + def __init__(self): + self.F_outer_count = 0 + + def F_outer(self, x): + self.F_outer_count += 1 + if self.F_outer_count > 1000: + raise Exception("Nested minimization failed to terminate.") + inner_res = minimize(self.F_inner, (3, 4), method="SLSQP") + assert_(inner_res.success) + assert_allclose(inner_res.x, [1, 1]) + return x[0]**2 + x[1]**2 + x[2]**2 + + def F_inner(self, x): + return (x[0] - 1)**2 + (x[1] - 1)**2 + + def solve(self): + outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP") + assert_(outer_res.success) + assert_allclose(outer_res.x, [0, 0, 0]) + + problem = NestedProblem() + problem.solve() + + def test_gh1758(self): + # the test suggested in gh1758 + # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/ + # implement two equality constraints, in R^2. + def fun(x): + return np.sqrt(x[1]) + + def f_eqcon(x): + """ Equality constraint """ + return x[1] - (2 * x[0]) ** 3 + + def f_eqcon2(x): + """ Equality constraint """ + return x[1] - (-x[0] + 1) ** 3 + + c1 = {'type': 'eq', 'fun': f_eqcon} + c2 = {'type': 'eq', 'fun': f_eqcon2} + + res = minimize(fun, [8, 0.25], method='SLSQP', + constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)]) + + np.testing.assert_allclose(res.fun, 0.5443310539518) + np.testing.assert_allclose(res.x, [0.33333333, 0.2962963]) + assert res.success + + def test_gh9640(self): + np.random.seed(10) + cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3}, + {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2}) + bnds = ((-2, 2), (-2, 2), (-2, 2)) + + def target(x): + return 1 + x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696] + res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons, + options={'disp':False, 'maxiter':10000}) + + # The problem is infeasible, so it cannot succeed + assert not res.success + + def test_parameters_stay_within_bounds(self): + # gh11403. For some problems the SLSQP Fortran code suggests a step + # outside one of the lower/upper bounds. When this happens + # approx_derivative complains because it's being asked to evaluate + # a gradient outside its domain. + np.random.seed(1) + bounds = Bounds(np.array([0.1]), np.array([1.0])) + n_inputs = len(bounds.lb) + x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) * + np.random.random(n_inputs)) + + def f(x): + assert (x >= bounds.lb).all() + return np.linalg.norm(x) + + with pytest.warns(RuntimeWarning, match='x were outside bounds'): + res = minimize(f, x0, method='SLSQP', bounds=bounds) + assert res.success diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..2cde9837bfd08e62916660a9750d833629b6b547 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py @@ -0,0 +1,345 @@ +""" +Unit tests for TNC optimization routine from tnc.py +""" +import pytest +from numpy.testing import assert_allclose, assert_equal + +import numpy as np +from math import pow + +from scipy import optimize + + +class TestTnc: + """TNC non-linear optimization. + + These tests are taken from Prof. K. Schittkowski's test examples + for constrained non-linear programming. + + http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm + + """ + def setup_method(self): + # options for minimize + self.opts = {'disp': False, 'maxfun': 200} + + # objective functions and Jacobian for each test + def f1(self, x, a=100.0): + return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2) + + def g1(self, x, a=100.0): + dif = [0, 0] + dif[1] = 2 * a * (x[1] - pow(x[0], 2)) + dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0) + return dif + + def fg1(self, x, a=100.0): + return self.f1(x, a), self.g1(x, a) + + def f3(self, x): + return x[1] + pow(x[1] - x[0], 2) * 1.0e-5 + + def g3(self, x): + dif = [0, 0] + dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5 + dif[1] = 1.0 - dif[0] + return dif + + def fg3(self, x): + return self.f3(x), self.g3(x) + + def f4(self, x): + return pow(x[0] + 1.0, 3) / 3.0 + x[1] + + def g4(self, x): + dif = [0, 0] + dif[0] = pow(x[0] + 1.0, 2) + dif[1] = 1.0 + return dif + + def fg4(self, x): + return self.f4(x), self.g4(x) + + def f5(self, x): + return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \ + 1.5 * x[0] + 2.5 * x[1] + 1.0 + + def g5(self, x): + dif = [0, 0] + v1 = np.cos(x[0] + x[1]) + v2 = 2.0*(x[0] - x[1]) + + dif[0] = v1 + v2 - 1.5 + dif[1] = v1 - v2 + 2.5 + return dif + + def fg5(self, x): + return self.f5(x), self.g5(x) + + def f38(self, x): + return (100.0 * pow(x[1] - pow(x[0], 2), 2) + + pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) + + pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) + + pow(x[3] - 1.0, 2)) + + 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5 + + def g38(self, x): + dif = [0, 0, 0, 0] + dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) - + 2.0 * (1.0 - x[0])) * 1.0e-5 + dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) + + 19.8 * (x[3] - 1.0)) * 1.0e-5 + dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) - + 2.0 * (1.0 - x[2])) * 1.0e-5 + dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) + + 19.8 * (x[1] - 1.0)) * 1.0e-5 + return dif + + def fg38(self, x): + return self.f38(x), self.g38(x) + + def f45(self, x): + return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0 + + def g45(self, x): + dif = [0] * 5 + dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0 + dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0 + dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0 + dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0 + dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0 + return dif + + def fg45(self, x): + return self.f45(x), self.g45(x) + + # tests + # minimize with method=TNC + def test_minimize_tnc1(self): + x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + iterx = [] # to test callback + + res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, + bounds=bnds, options=self.opts, + callback=iterx.append) + assert_allclose(res.fun, self.f1(xopt), atol=1e-8) + assert_equal(len(iterx), res.nit) + + def test_minimize_tnc1b(self): + x0, bnds = np.array([-2, 1]), ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.f1, x0, method='TNC', + bounds=bnds, options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4) + + def test_minimize_tnc1c(self): + x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.fg1, x0, method='TNC', + jac=True, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc2(self): + x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + x = optimize.minimize(self.f1, x0, method='TNC', + jac=self.g1, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc3(self): + x0, bnds = [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + x = optimize.minimize(self.f3, x0, method='TNC', + jac=self.g3, bounds=bnds, + options=self.opts).x + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8) + + def test_minimize_tnc4(self): + x0,bnds = [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + x = optimize.minimize(self.f4, x0, method='TNC', + jac=self.g4, bounds=bnds, + options=self.opts).x + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8) + + def test_minimize_tnc5(self): + x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + x = optimize.minimize(self.f5, x0, method='TNC', + jac=self.g5, bounds=bnds, + options=self.opts).x + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8) + + def test_minimize_tnc38(self): + x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + x = optimize.minimize(self.f38, x0, method='TNC', + jac=self.g38, bounds=bnds, + options=self.opts).x + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8) + + def test_minimize_tnc45(self): + x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + x = optimize.minimize(self.f45, x0, method='TNC', + jac=self.g45, bounds=bnds, + options=self.opts).x + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8) + + # fmin_tnc + def test_tnc1(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ), + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1b(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc1c(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1, + bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc2(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc3(self): + fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc4(self): + fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc5(self): + fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc38(self): + fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_tnc45(self): + fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3), + (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize._tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize._tnc.RCSTRINGS[rc]) + + def test_raising_exceptions(self): + # tnc was ported to cython from hand-crafted cpython code + # check that Exception handling works. + def myfunc(x): + raise RuntimeError("myfunc") + + def myfunc1(x): + return optimize.rosen(x) + + def callback(x): + raise ValueError("callback") + + with pytest.raises(RuntimeError): + optimize.minimize(myfunc, [0, 1], method="TNC") + + with pytest.raises(ValueError): + optimize.minimize( + myfunc1, [0, 1], method="TNC", callback=callback + ) + + def test_callback_shouldnt_affect_minimization(self): + # gh14879. The output of a TNC minimization was different depending + # on whether a callback was used or not. The two should be equivalent. + # The issue was that TNC was unscaling/scaling x, and this process was + # altering x in the process. Now the callback uses an unscaled + # temporary copy of x. + def callback(x): + pass + + fun = optimize.rosen + bounds = [(0, 10)] * 4 + x0 = [1, 2, 3, 4.] + res = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000} + ) + res2 = optimize.minimize( + fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}, + callback=callback + ) + assert_allclose(res2.x, res.x) + assert_allclose(res2.fun, res.fun) + assert_equal(res2.nfev, res.nfev) diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py new file mode 100644 index 0000000000000000000000000000000000000000..b130362323c9ba4a126019fb13974a37b35d7a28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py @@ -0,0 +1,171 @@ +""" +Unit tests for Krylov space trust-region subproblem solver. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +import numpy as np +from scipy.optimize._trlib import (get_trlib_quadratic_subproblem) +from numpy.testing import (assert_, + assert_almost_equal, + assert_equal, assert_array_almost_equal) + +KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6) +KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=True) + +class TestKrylovQuadraticSubproblem: + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([5.0, 0.0, 4.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, + np.array([-0.46125446, 0., -0.19298788])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([0.0, 2.0, 0.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_interior_convergence(self): + + H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]) + g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + + def test_for_very_close_to_zero(self): + + H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_disp(self, capsys): + H = -np.eye(5) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + subprob = KrylovQP_disp(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + out, err = capsys.readouterr() + assert_(out.startswith(' TR Solving trust region problem'), repr(out)) + diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..86606d8c4318cb26825a8fa955b2ef7647f4009c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py @@ -0,0 +1,939 @@ +import pytest + +from functools import lru_cache + +from numpy.testing import (assert_warns, assert_, + assert_allclose, + assert_equal, + assert_array_equal, + suppress_warnings) +import numpy as np +from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos + +from scipy import optimize +from scipy.optimize import (_zeros_py as zeros, newton, root_scalar, + OptimizeResult) + +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +# Import testing parameters +from scipy.optimize._tstutils import get_tests, functions as tstutils_functions + +TOL = 4*np.finfo(float).eps # tolerance + +_FLOAT_EPS = finfo(float).eps + +bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth, + zeros.toms748] +gradient_methods = [zeros.newton] +all_methods = bracket_methods + gradient_methods + +# A few test functions used frequently: +# # A simple quadratic, (x-1)^2 - 1 +def f1(x): + return x ** 2 - 2 * x - 1 + + +def f1_1(x): + return 2 * x - 2 + + +def f1_2(x): + return 2.0 + 0 * x + + +def f1_and_p_and_pp(x): + return f1(x), f1_1(x), f1_2(x) + + +# Simple transcendental function +def f2(x): + return exp(x) - cos(x) + + +def f2_1(x): + return exp(x) + sin(x) + + +def f2_2(x): + return exp(x) + cos(x) + + +# lru cached function +@lru_cache +def f_lrucached(x): + return x + + +class TestScalarRootFinders: + # Basic tests for all scalar root finders + + xtol = 4 * np.finfo(float).eps + rtol = 4 * np.finfo(float).eps + + def _run_one_test(self, tc, method, sig_args_keys=None, + sig_kwargs_keys=None, **kwargs): + method_args = [] + for k in sig_args_keys or []: + if k not in tc: + # If a,b not present use x0, x1. Similarly for f and func + k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k) + method_args.append(tc[k]) + + method_kwargs = dict(**kwargs) + method_kwargs.update({'full_output': True, 'disp': False}) + for k in sig_kwargs_keys or []: + method_kwargs[k] = tc[k] + + root = tc.get('root') + func_args = tc.get('args', ()) + + try: + r, rr = method(*method_args, args=func_args, **method_kwargs) + return root, rr, tc + except Exception: + return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR, method), tc + + def run_tests(self, tests, method, name, known_fail=None, **kwargs): + r"""Run test-cases using the specified method and the supplied signature. + + Extract the arguments for the method call from the test case + dictionary using the supplied keys for the method's signature.""" + # The methods have one of two base signatures: + # (f, a, b, **kwargs) # newton + # (func, x0, **kwargs) # bisect/brentq/... + + # FullArgSpec with args, varargs, varkw, defaults, ... + sig = _getfullargspec(method) + assert_(not sig.kwonlyargs) + nDefaults = len(sig.defaults) + nRequired = len(sig.args) - nDefaults + sig_args_keys = sig.args[:nRequired] + sig_kwargs_keys = [] + if name in ['secant', 'newton', 'halley']: + if name in ['newton', 'halley']: + sig_kwargs_keys.append('fprime') + if name in ['halley']: + sig_kwargs_keys.append('fprime2') + kwargs['tol'] = self.xtol + else: + kwargs['xtol'] = self.xtol + kwargs['rtol'] = self.rtol + + results = [list(self._run_one_test( + tc, method, sig_args_keys=sig_args_keys, + sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests] + # results= [[true root, full output, tc], ...] + + known_fail = known_fail or [] + notcvgd = [elt for elt in results if not elt[1].converged] + notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail] + notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd] + assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []]) + + # The usable xtol and rtol depend on the test + tols = {'xtol': self.xtol, 'rtol': self.rtol} + tols.update(**kwargs) + rtol = tols['rtol'] + atol = tols.get('tol', tols['xtol']) + + cvgd = [elt for elt in results if elt[1].converged] + approx = [elt[1].root for elt in cvgd] + correct = [elt[0] for elt in cvgd] + # See if the root matches the reference value + notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if + not isclose(a, c, rtol=rtol, atol=atol) + and elt[-1]['ID'] not in known_fail] + # If not, evaluate the function and see if is 0 at the purported root + fvs = [tc['f'](aroot, *tc.get('args', tuple())) + for aroot, c, fullout, tc in notclose] + notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0] + assert_equal([notclose, len(notclose)], [[], 0]) + method_from_result = [result[1].method for result in results] + expected_method = [name for _ in results] + assert_equal(method_from_result, expected_method) + + def run_collection(self, collection, method, name, smoothness=None, + known_fail=None, **kwargs): + r"""Run a collection of tests using the specified method. + + The name is used to determine some optional arguments.""" + tests = get_tests(collection, smoothness=smoothness) + self.run_tests(tests, method, name, known_fail=known_fail, **kwargs) + + +class TestBracketMethods(TestScalarRootFinders): + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_root_scalar(self, method, function): + # Tests bracketing root finders called via `root_scalar` on a small + # set of simple problems, each of which has a root at `x=1`. Checks for + # converged status and that the root was found. + a, b = .5, sqrt(3) + + r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a, + xtol=self.xtol, rtol=self.rtol) + assert r.converged + assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol) + assert r.method == method.__name__ + + @pytest.mark.parametrize('method', bracket_methods) + @pytest.mark.parametrize('function', tstutils_functions) + def test_basic_individual(self, method, function): + # Tests individual bracketing root finders on a small set of simple + # problems, each of which has a root at `x=1`. Checks for converged + # status and that the root was found. + a, b = .5, sqrt(3) + root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol, + full_output=True) + + assert r.converged + assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol) + + @pytest.mark.parametrize('method', bracket_methods) + def test_aps_collection(self, method): + self.run_collection('aps', method, method.__name__, smoothness=1) + + @pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder, + zeros.toms748]) + def test_chandrupatla_collection(self, method): + known_fail = {'fun7.4'} if method == zeros.ridder else {} + self.run_collection('chandrupatla', method, method.__name__, + known_fail=known_fail) + + @pytest.mark.parametrize('method', bracket_methods) + def test_lru_cached_individual(self, method): + # check that https://github.com/scipy/scipy/issues/10846 is fixed + # (`root_scalar` failed when passed a function that was `@lru_cache`d) + a, b = -1, 1 + root, r = method(f_lrucached, a, b, full_output=True) + assert r.converged + assert_allclose(root, 0) + + +class TestNewton(TestScalarRootFinders): + def test_newton_collections(self): + known_fail = ['aps.13.00'] + known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27 + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'newton', + smoothness=2, known_fail=known_fail) + + def test_halley_collections(self): + known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', + 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', + 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', + 'aps.12.18', 'aps.13.00'] + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'halley', + smoothness=2, known_fail=known_fail) + + def test_newton(self): + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + x = zeros.newton(f, 3, tol=1e-6) + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1 + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley + assert_allclose(f(x), 0, atol=1e-6) + + def test_newton_by_name(self): + r"""Invoke newton through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f' + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_secant_by_name(self): + r"""Invoke secant through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1 + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_halley_by_name(self): + r"""Invoke halley through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='halley', x0=3, + fprime=f_1, fprime2=f_2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_root_scalar_fail(self): + message = 'fprime2 must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 + message = 'fprime must be specified for halley' + with pytest.raises(ValueError, match=message): + root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime + + def test_array_newton(self): + """test newton with array""" + + def f1(x, *a): + b = a[0] + x * a[3] + return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x + + def f1_1(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1 + + def f1_2(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2 + + a0 = np.array([ + 5.32725221, 5.48673747, 5.49539973, + 5.36387202, 4.80237316, 1.43764452, + 5.23063958, 5.46094772, 5.50512718, + 5.42046290 + ]) + a1 = (np.sin(range(10)) + 1.0) * 7.0 + args = (a0, a1, 1e-09, 0.004, 10, 0.27456) + x0 = [7.0] * 10 + x = zeros.newton(f1, x0, f1_1, args) + x_expected = ( + 6.17264965, 11.7702805, 12.2219954, + 7.11017681, 1.18151293, 0.143707955, + 4.31928228, 10.5419107, 12.7552490, + 8.91225749 + ) + assert_allclose(x, x_expected) + # test halley's + x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) + assert_allclose(x, x_expected) + # test secant + x = zeros.newton(f1, x0, args=args) + assert_allclose(x, x_expected) + + def test_array_newton_complex(self): + def f(x): + return x + 1+1j + + def fprime(x): + return 1.0 + + t = np.full(4, 1j) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + # should work even if x0 is not complex + t = np.ones(4) + x = zeros.newton(f, t, fprime=fprime) + assert_allclose(f(x), 0.) + + x = zeros.newton(f, t) + assert_allclose(f(x), 0.) + + def test_array_secant_active_zero_der(self): + """test secant doesn't continue to iterate zero derivatives""" + x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5], + args=[np.array([17, 25])]) + assert_allclose(x, (4.123105625617661, 5.0)) + + def test_array_newton_integers(self): + # test secant with float + x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2, + args=([15.0, 17.0],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + # test integer becomes float + x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + + def test_array_newton_zero_der_failures(self): + # test derivative zero warning + assert_warns(RuntimeWarning, zeros.newton, + lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y) + # test failures and zero_der + with pytest.warns(RuntimeWarning): + results = zeros.newton(lambda y: y**2 - 2, [0., 0.], + lambda y: 2*y, full_output=True) + assert_allclose(results.root, 0) + assert results.zero_der.all() + assert not results.converged.any() + + def test_newton_combined(self): + def f1(x): + return x ** 2 - 2 * x - 1 + def f1_1(x): + return 2 * x - 2 + def f1_2(x): + return 2.0 + 0 * x + + def f1_and_p_and_pp(x): + return x**2 - 2*x-1, 2*x-2, 2.0 + + sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) + sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(2*sol.function_calls, sol0.function_calls) + + sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) + sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(3*sol.function_calls, sol0.function_calls) + + def test_newton_full_output(self, capsys): + # Test the full_output capability, both when converging and not. + # Use simple polynomials, to avoid hitting platform dependencies + # (e.g., exp & trig) in number of iterations + + x0 = 3 + expected_counts = [(6, 7), (5, 10), (3, 9)] + + for derivs in range(3): + kwargs = {'tol': 1e-6, 'full_output': True, } + for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]: + kwargs[k] = v + + x, r = zeros.newton(f1, x0, disp=False, **kwargs) + assert_(r.converged) + assert_equal(x, r.root) + assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) + if derivs == 0: + assert r.function_calls <= r.iterations + 1 + else: + assert_equal(r.function_calls, (derivs + 1) * r.iterations) + + # Now repeat, allowing one fewer iteration to force convergence failure + iters = r.iterations - 1 + x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs) + assert_(not r.converged) + assert_equal(x, r.root) + assert_equal(r.iterations, iters) + + if derivs == 1: + # Check that the correct Exception is raised and + # validate the start of the message. + msg = 'Failed to converge after %d iterations, value is .*' % (iters) + with pytest.raises(RuntimeError, match=msg): + x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs) + + def test_deriv_zero_warning(self): + def func(x): + return x ** 2 - 2.0 + def dfunc(x): + return 2 * x + assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False) + with pytest.raises(RuntimeError, match='Derivative was zero'): + zeros.newton(func, 0.0, dfunc) + + def test_newton_does_not_modify_x0(self): + # https://github.com/scipy/scipy/issues/9964 + x0 = np.array([0.1, 3]) + x0_copy = x0.copy() # Copy to test for equality. + newton(np.sin, x0, np.cos) + assert_array_equal(x0, x0_copy) + + def test_gh17570_defaults(self): + # Previously, when fprime was not specified, root_scalar would default + # to secant. When x1 was not specified, secant failed. + # Check that without fprime, the default is secant if x1 is specified + # and newton otherwise. + res_newton_default = root_scalar(f1, method='newton', x0=3, xtol=1e-6) + res_secant_default = root_scalar(f1, method='secant', x0=3, x1=2, + xtol=1e-6) + # `newton` uses the secant method when `x1` and `x2` are specified + res_secant = newton(f1, x0=3, x1=2, tol=1e-6, full_output=True)[1] + + # all three found a root + assert_allclose(f1(res_newton_default.root), 0, atol=1e-6) + assert res_newton_default.root.shape == tuple() + assert_allclose(f1(res_secant_default.root), 0, atol=1e-6) + assert res_secant_default.root.shape == tuple() + assert_allclose(f1(res_secant.root), 0, atol=1e-6) + assert res_secant.root.shape == tuple() + + # Defaults are correct + assert (res_secant_default.root + == res_secant.root + != res_newton_default.iterations) + assert (res_secant_default.iterations + == res_secant_default.function_calls - 1 # true for secant + == res_secant.iterations + != res_newton_default.iterations + == res_newton_default.function_calls/2) # newton 2-point diff + + @pytest.mark.parametrize('kwargs', [dict(), {'method': 'newton'}]) + def test_args_gh19090(self, kwargs): + def f(x, a, b): + assert a == 3 + assert b == 1 + return (x ** a - b) + + res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs) + assert res.converged + assert_allclose(res.root, 1) + + @pytest.mark.parametrize('method', ['secant', 'newton']) + def test_int_x0_gh19280(self, method): + # Originally, `newton` ensured that only floats were passed to the + # callable. This was indadvertently changed by gh-17669. Check that + # it has been changed back. + def f(x): + # an integer raised to a negative integer power would fail + return x**-2 - 2 + + res = optimize.root_scalar(f, x0=1, method=method) + assert res.converged + assert_allclose(abs(res.root), 2**-0.5) + assert res.root.dtype == np.dtype(np.float64) + + +def test_gh_5555(): + root = 0.1 + + def f(x): + return x - root + + methods = [zeros.bisect, zeros.ridder] + xtol = rtol = TOL + for method in methods: + res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol) + assert_allclose(root, res, atol=xtol, rtol=rtol, + err_msg='method %s' % method.__name__) + + +def test_gh_5557(): + # Show that without the changes in 5557 brentq and brenth might + # only achieve a tolerance of 2*(xtol + rtol*|res|). + + # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1, + # 0.4). The important parts are that |f(0)| < |f(1)| (so that + # brent takes 0 as the initial guess), |f(0)| < atol (so that + # brent accepts 0 as the root), and that the exact root of f lies + # more than atol away from 0 (so that brent doesn't achieve the + # desired tolerance). + def f(x): + if x < 0.5: + return -0.1 + else: + return x - 0.6 + + atol = 0.51 + rtol = 4 * _FLOAT_EPS + methods = [zeros.brentq, zeros.brenth] + for method in methods: + res = method(f, 0, 1, xtol=atol, rtol=rtol) + assert_allclose(0.6, res, atol=atol, rtol=rtol) + + +def test_brent_underflow_in_root_bracketing(): + # Testing if an interval [a,b] brackets a zero of a function + # by checking f(a)*f(b) < 0 is not reliable when the product + # underflows/overflows. (reported in issue# 13737) + + underflow_scenario = (-450.0, -350.0, -400.0) + overflow_scenario = (350.0, 450.0, 400.0) + + for a, b, root in [underflow_scenario, overflow_scenario]: + c = np.exp(root) + for method in [zeros.brenth, zeros.brentq]: + res = method(lambda x: np.exp(x)-c, a, b) + assert_allclose(root, res) + + +class TestRootResults: + r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0, + method="newton") + + def test_repr(self): + expected_repr = (" converged: True\n flag: converged" + "\n function_calls: 46\n iterations: 44\n" + " root: 1.0\n method: newton") + assert_equal(repr(self.r), expected_repr) + + def test_type(self): + assert isinstance(self.r, OptimizeResult) + + +def test_complex_halley(): + """Test Halley's works with complex roots""" + def f(x, *a): + return a[0] * x**2 + a[1] * x + a[2] + + def f_1(x, *a): + return 2 * a[0] * x + a[1] + + def f_2(x, *a): + retval = 2 * a[0] + try: + size = len(x) + except TypeError: + return retval + else: + return [retval] * size + + z = complex(1.0, 2.0) + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + # (-0.75000000000000078+1.1989578808281789j) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + z = [z] * 10 + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + + +def test_zero_der_nz_dp(capsys): + """Test secant method with a non-zero dp, but an infinite newton step""" + # pick a symmetrical functions and choose a point on the side that with dx + # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, + # which has a root at x = 100 and is symmetrical around the line x = 100 + # we have to pick a really big number so that it is consistently true + # now find a point on each side so that the secant has a zero slope + dx = np.finfo(float).eps ** 0.33 + # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 + # -> 200 = p0 * (2 + dx) + dx + p0 = (200.0 - dx) / (2.0 + dx) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "RMS of") + x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) + assert_allclose(x, [100] * 10) + # test scalar cases too + p0 = (2.0 - 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, 1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True) + p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False) + assert_allclose(x, -1) + with pytest.raises(RuntimeError, match='Tolerance of'): + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True) + + +def test_array_newton_failures(): + """Test that array newton fails as expected""" + # p = 0.68 # [MPa] + # dp = -0.068 * 1e6 # [Pa] + # T = 323 # [K] + diameter = 0.10 # [m] + # L = 100 # [m] + roughness = 0.00015 # [m] + rho = 988.1 # [kg/m**3] + mu = 5.4790e-04 # [Pa*s] + u = 2.488 # [m/s] + reynolds_number = rho * u * diameter / mu # Reynolds number + + def colebrook_eqn(darcy_friction, re, dia): + return (1 / np.sqrt(darcy_friction) + + 2 * np.log10(roughness / 3.7 / dia + + 2.51 / re / np.sqrt(darcy_friction))) + + # only some failures + with pytest.warns(RuntimeWarning): + result = zeros.newton( + colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + assert not result.converged.all() + # they all fail + with pytest.raises(RuntimeError): + result = zeros.newton( + colebrook_eqn, x0=[0.01] * 2, maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + + +# this test should **not** raise a RuntimeWarning +def test_gh8904_zeroder_at_root_fails(): + """Test that Newton or Halley don't warn if zero derivative at root""" + + # a function that has a zero derivative at it's root + def f_zeroder_root(x): + return x**3 - x**2 + + # should work with secant + r = zeros.newton(f_zeroder_root, x0=0) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # 1st derivative + def fder(x): + return 3 * x**2 - 2 * x + + # 2nd derivative + def fder2(x): + return 6*x - 2 + + # should work with newton and halley + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # also test that if a root is found we do not raise RuntimeWarning even if + # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and + # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the + # root, but if the solver continued with that guess, then it will calculate + # a zero derivative, so it should return the root w/o RuntimeWarning + r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # doesn't apply to halley + + +def test_gh_8881(): + r"""Test that Halley's method realizes that the 2nd order adjustment + is too big and drops off to the 1st order adjustment.""" + n = 9 + + def f(x): + return power(x, 1.0/n) - power(n, 1.0/n) + + def fp(x): + return power(x, (1.0-n)/n)/n + + def fpp(x): + return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n + + x0 = 0.1 + # The root is at x=9. + # The function has positive slope, x0 < root. + # Newton succeeds in 8 iterations + rt, r = newton(f, x0, fprime=fp, full_output=True) + assert r.converged + # Before the Issue 8881/PR 8882, halley would send x in the wrong direction. + # Check that it now succeeds. + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + +def test_gh_9608_preserve_array_shape(): + """ + Test that shape is preserved for array inputs even if fprime or fprime2 is + scalar + """ + def f(x): + return x**2 + + def fp(x): + return 2 * x + + def fpp(x): + return 2 + + x0 = np.array([-2], dtype=np.float32) + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert r.converged + + x0_array = np.array([-2, -3], dtype=np.float32) + # This next invocation should fail + with pytest.raises(IndexError): + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp, full_output=True + ) + + def fpp_array(x): + return np.full(np.shape(x), 2, dtype=np.float32) + + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True + ) + assert result.converged.all() + + +@pytest.mark.parametrize( + "maximum_iterations,flag_expected", + [(10, zeros.CONVERR), (100, zeros.CONVERGED)]) +def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected): + """ + Test that if the maximum iterations is exceeded that the flag is not + converged. + """ + result = zeros.brentq( + lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5, + -30, 30, (), 1e-6, 1e-6, maximum_iterations, + full_output=True, disp=False) + assert result[1].flag == flag_expected + if flag_expected == zeros.CONVERR: + # didn't converge because exceeded maximum iterations + assert result[1].iterations == maximum_iterations + elif flag_expected == zeros.CONVERGED: + # converged before maximum iterations + assert result[1].iterations < maximum_iterations + + +def test_gh9551_raise_error_if_disp_true(): + """Test that if disp is true then zero derivative raises RuntimeError""" + + def f(x): + return x*x + 1 + + def f_p(x): + return 2*x + + assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False) + with pytest.raises( + RuntimeError, + match=r'^Derivative was zero\. Failed to converge after \d+ iterations, ' + r'value is [+-]?\d*\.\d+\.$'): + zeros.newton(f, 1.0, f_p) + root = zeros.newton(f, complex(10.0, 10.0), f_p) + assert_allclose(root, complex(0.0, 1.0)) + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh3089_8394(solver_name): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that this is resolved. + def f(x): + return np.nan + + solver = getattr(zeros, solver_name) + with pytest.raises(ValueError, match="The function value at x..."): + solver(f, 0, 1) + + +@pytest.mark.parametrize('method', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +def test_gh18171(method): + # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect + # results when they encountered NaNs. Check that `root_scalar` returns + # normally but indicates that convergence was unsuccessful. See gh-18171. + def f(x): + f._count += 1 + return np.nan + f._count = 0 + + res = root_scalar(f, bracket=(0, 1), method=method) + assert res.converged is False + assert res.flag.startswith("The function value at x") + assert res.function_calls == f._count + assert str(res.root) in res.flag + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_function_calls(solver_name, rs_interface): + # There do not appear to be checks that the bracketing solvers report the + # correct number of function evaluations. Check that this is the case. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + f.calls += 1 + return x**2 - 1 + f.calls = 0 + + res = solver(f, 0, 10, full_output=True) + + if rs_interface: + assert res.function_calls == f.calls + else: + assert res[1].function_calls == f.calls + + +def test_gh_14486_converged_false(): + """Test that zero slope with secant method results in a converged=False""" + def lhs(x): + return x * np.exp(-x*x) - 0.07 + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0) + assert not res.converged + assert res.flag == 'convergence error' + + with pytest.warns(RuntimeWarning, match='Tolerance of'): + res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1] + assert not res.converged + assert res.flag == 'convergence error' + + +@pytest.mark.parametrize('solver_name', + ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) +@pytest.mark.parametrize('rs_interface', [True, False]) +def test_gh5584(solver_name, rs_interface): + # gh-5584 reported that an underflow can cause sign checks in the algorithm + # to fail. Check that this is resolved. + solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) + if rs_interface else getattr(zeros, solver_name)) + + def f(x): + return 1e-200*x + + # Report failure when signs are the same + with pytest.raises(ValueError, match='...must have different signs'): + solver(f, -0.5, -0.4, full_output=True) + + # Solve successfully when signs are different + res = solver(f, -0.5, 0.4, full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + # Solve successfully when one side is negative zero + res = solver(f, -0.5, float('-0.0'), full_output=True) + res = res if rs_interface else res[1] + assert res.converged + assert_allclose(res.root, 0, atol=1e-8) + + +def test_gh13407(): + # gh-13407 reported that the message produced by `scipy.optimize.toms748` + # when `rtol < eps` is incorrect, and also that toms748 is unusual in + # accepting `rtol` as low as eps while other solvers raise at 4*eps. Check + # that the error message has been corrected and that `rtol=eps` can produce + # a lower function value than `rtol=4*eps`. + def f(x): + return x**3 - 2*x - 5 + + xtol = 1e-300 + eps = np.finfo(float).eps + x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps) + f1 = f(x1) + x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps) + f4 = f(x4) + assert f1 < f4 + + # using old-style syntax to get exactly the same message + message = fr"rtol too small \({eps/2:g} < {eps:g}\)" + with pytest.raises(ValueError, match=message): + zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2) + + +def test_newton_complex_gh10103(): + # gh-10103 reported a problem when `newton` is pass a Python complex x0, + # no `fprime` (secant method), and no `x1` (`x1` must be constructed). + # Check that this is resolved. + def f(z): + return z - 1 + res = newton(f, 1+1j) + assert_allclose(res, 1, atol=1e-12) + + res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant') + assert_allclose(res.root, 1, atol=1e-12) + + +@pytest.mark.parametrize('method', all_methods) +def test_maxiter_int_check_gh10236(method): + # gh-10236 reported that the error message when `maxiter` is not an integer + # was difficult to interpret. Check that this was resolved (by gh-10907). + message = "'float' object cannot be interpreted as an integer" + with pytest.raises(TypeError, match=message): + method(f1, 0.0, 1.0, maxiter=72.45)