Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/12.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/12.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py +525 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py +1677 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py +123 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py +165 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py +314 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py +318 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py +534 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py +112 -0
- venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py +352 -0
- venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_arraytools.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_bsplines.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_czt.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/12.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8751af51674dbfeb8035179d67316d256d211127827891159db9e37dab519bb0
|
3 |
+
size 9372
|
ckpts/universal/global_step40/zero/12.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a00e6f8f4fd35a6dfa737e1d0a2eeb3fd5be714af0611850eb955008a0fb2e2
|
3 |
+
size 9387
|
ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:245c8c27fcc0860f19363ad10ceedede28cddc564168e2af51eb18de51e17faf
|
3 |
+
size 50332828
|
ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:becb1ae1a8572ca35ea3af814a8ea2a28660d6f019edfef1e53a9193c4d58b99
|
3 |
+
size 50332843
|
ckpts/universal/global_step40/zero/7.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe7ac15c4f0049914aedce9d377301c9f9205d950aea818a1986f9af94322fc9
|
3 |
+
size 50332749
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (188 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc
ADDED
Binary file (17.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc
ADDED
Binary file (49.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc
ADDED
Binary file (8.04 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc
ADDED
Binary file (7.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc
ADDED
Binary file (4.98 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc
ADDED
Binary file (40.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc
ADDED
Binary file (6.99 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc
ADDED
Binary file (23.3 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc
ADDED
Binary file (11.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc
ADDED
Binary file (9.06 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc
ADDED
Binary file (8.25 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc
ADDED
Binary file (1.53 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc
ADDED
Binary file (2.79 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc
ADDED
Binary file (28.5 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc
ADDED
Binary file (79.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc
ADDED
Binary file (9.53 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc
ADDED
Binary file (43.3 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc
ADDED
Binary file (17.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc
ADDED
Binary file (2.08 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc
ADDED
Binary file (23.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc
ADDED
Binary file (12.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc
ADDED
Binary file (4.44 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc
ADDED
Binary file (9.41 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc
ADDED
Binary file (6.08 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc
ADDED
Binary file (33.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py
ADDED
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for the basin hopping global minimization algorithm.
|
3 |
+
"""
|
4 |
+
import copy
|
5 |
+
|
6 |
+
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
|
7 |
+
assert_allclose)
|
8 |
+
import pytest
|
9 |
+
from pytest import raises as assert_raises
|
10 |
+
import numpy as np
|
11 |
+
from numpy import cos, sin
|
12 |
+
|
13 |
+
from scipy.optimize import basinhopping, OptimizeResult
|
14 |
+
from scipy.optimize._basinhopping import (
|
15 |
+
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
|
16 |
+
|
17 |
+
|
18 |
+
def func1d(x):
|
19 |
+
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
|
20 |
+
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
|
21 |
+
return f, df
|
22 |
+
|
23 |
+
|
24 |
+
def func2d_nograd(x):
|
25 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
26 |
+
return f
|
27 |
+
|
28 |
+
|
29 |
+
def func2d(x):
|
30 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
31 |
+
df = np.zeros(2)
|
32 |
+
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
|
33 |
+
df[1] = 2. * x[1] + 0.2
|
34 |
+
return f, df
|
35 |
+
|
36 |
+
|
37 |
+
def func2d_easyderiv(x):
|
38 |
+
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
|
39 |
+
df = np.zeros(2)
|
40 |
+
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
|
41 |
+
df[1] = 2.0*x[0] + 4.0*x[1]
|
42 |
+
|
43 |
+
return f, df
|
44 |
+
|
45 |
+
|
46 |
+
class MyTakeStep1(RandomDisplacement):
|
47 |
+
"""use a copy of displace, but have it set a special parameter to
|
48 |
+
make sure it's actually being used."""
|
49 |
+
def __init__(self):
|
50 |
+
self.been_called = False
|
51 |
+
super().__init__()
|
52 |
+
|
53 |
+
def __call__(self, x):
|
54 |
+
self.been_called = True
|
55 |
+
return super().__call__(x)
|
56 |
+
|
57 |
+
|
58 |
+
def myTakeStep2(x):
|
59 |
+
"""redo RandomDisplacement in function form without the attribute stepsize
|
60 |
+
to make sure everything still works ok
|
61 |
+
"""
|
62 |
+
s = 0.5
|
63 |
+
x += np.random.uniform(-s, s, np.shape(x))
|
64 |
+
return x
|
65 |
+
|
66 |
+
|
67 |
+
class MyAcceptTest:
|
68 |
+
"""pass a custom accept test
|
69 |
+
|
70 |
+
This does nothing but make sure it's being used and ensure all the
|
71 |
+
possible return values are accepted
|
72 |
+
"""
|
73 |
+
def __init__(self):
|
74 |
+
self.been_called = False
|
75 |
+
self.ncalls = 0
|
76 |
+
self.testres = [False, 'force accept', True, np.bool_(True),
|
77 |
+
np.bool_(False), [], {}, 0, 1]
|
78 |
+
|
79 |
+
def __call__(self, **kwargs):
|
80 |
+
self.been_called = True
|
81 |
+
self.ncalls += 1
|
82 |
+
if self.ncalls - 1 < len(self.testres):
|
83 |
+
return self.testres[self.ncalls - 1]
|
84 |
+
else:
|
85 |
+
return True
|
86 |
+
|
87 |
+
|
88 |
+
class MyCallBack:
|
89 |
+
"""pass a custom callback function
|
90 |
+
|
91 |
+
This makes sure it's being used. It also returns True after 10
|
92 |
+
steps to ensure that it's stopping early.
|
93 |
+
|
94 |
+
"""
|
95 |
+
def __init__(self):
|
96 |
+
self.been_called = False
|
97 |
+
self.ncalls = 0
|
98 |
+
|
99 |
+
def __call__(self, x, f, accepted):
|
100 |
+
self.been_called = True
|
101 |
+
self.ncalls += 1
|
102 |
+
if self.ncalls == 10:
|
103 |
+
return True
|
104 |
+
|
105 |
+
|
106 |
+
class TestBasinHopping:
|
107 |
+
|
108 |
+
def setup_method(self):
|
109 |
+
""" Tests setup.
|
110 |
+
|
111 |
+
Run tests based on the 1-D and 2-D functions described above.
|
112 |
+
"""
|
113 |
+
self.x0 = (1.0, [1.0, 1.0])
|
114 |
+
self.sol = (-0.195, np.array([-0.195, -0.1]))
|
115 |
+
|
116 |
+
self.tol = 3 # number of decimal places
|
117 |
+
|
118 |
+
self.niter = 100
|
119 |
+
self.disp = False
|
120 |
+
|
121 |
+
# fix random seed
|
122 |
+
np.random.seed(1234)
|
123 |
+
|
124 |
+
self.kwargs = {"method": "L-BFGS-B", "jac": True}
|
125 |
+
self.kwargs_nograd = {"method": "L-BFGS-B"}
|
126 |
+
|
127 |
+
def test_TypeError(self):
|
128 |
+
# test the TypeErrors are raised on bad input
|
129 |
+
i = 1
|
130 |
+
# if take_step is passed, it must be callable
|
131 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
132 |
+
take_step=1)
|
133 |
+
# if accept_test is passed, it must be callable
|
134 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
135 |
+
accept_test=1)
|
136 |
+
|
137 |
+
def test_input_validation(self):
|
138 |
+
msg = 'target_accept_rate has to be in range \\(0, 1\\)'
|
139 |
+
with assert_raises(ValueError, match=msg):
|
140 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=0.)
|
141 |
+
with assert_raises(ValueError, match=msg):
|
142 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=1.)
|
143 |
+
|
144 |
+
msg = 'stepwise_factor has to be in range \\(0, 1\\)'
|
145 |
+
with assert_raises(ValueError, match=msg):
|
146 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=0.)
|
147 |
+
with assert_raises(ValueError, match=msg):
|
148 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=1.)
|
149 |
+
|
150 |
+
def test_1d_grad(self):
|
151 |
+
# test 1-D minimizations with gradient
|
152 |
+
i = 0
|
153 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
154 |
+
niter=self.niter, disp=self.disp)
|
155 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
156 |
+
|
157 |
+
def test_2d(self):
|
158 |
+
# test 2d minimizations with gradient
|
159 |
+
i = 1
|
160 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
161 |
+
niter=self.niter, disp=self.disp)
|
162 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
163 |
+
assert_(res.nfev > 0)
|
164 |
+
|
165 |
+
def test_njev(self):
|
166 |
+
# test njev is returned correctly
|
167 |
+
i = 1
|
168 |
+
minimizer_kwargs = self.kwargs.copy()
|
169 |
+
# L-BFGS-B doesn't use njev, but BFGS does
|
170 |
+
minimizer_kwargs["method"] = "BFGS"
|
171 |
+
res = basinhopping(func2d, self.x0[i],
|
172 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
173 |
+
disp=self.disp)
|
174 |
+
assert_(res.nfev > 0)
|
175 |
+
assert_equal(res.nfev, res.njev)
|
176 |
+
|
177 |
+
def test_jac(self):
|
178 |
+
# test Jacobian returned
|
179 |
+
minimizer_kwargs = self.kwargs.copy()
|
180 |
+
# BFGS returns a Jacobian
|
181 |
+
minimizer_kwargs["method"] = "BFGS"
|
182 |
+
|
183 |
+
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
|
184 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
185 |
+
disp=self.disp)
|
186 |
+
|
187 |
+
assert_(hasattr(res.lowest_optimization_result, "jac"))
|
188 |
+
|
189 |
+
# in this case, the Jacobian is just [df/dx, df/dy]
|
190 |
+
_, jacobian = func2d_easyderiv(res.x)
|
191 |
+
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
|
192 |
+
self.tol)
|
193 |
+
|
194 |
+
def test_2d_nograd(self):
|
195 |
+
# test 2-D minimizations without gradient
|
196 |
+
i = 1
|
197 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
198 |
+
minimizer_kwargs=self.kwargs_nograd,
|
199 |
+
niter=self.niter, disp=self.disp)
|
200 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
201 |
+
|
202 |
+
def test_all_minimizers(self):
|
203 |
+
# Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA
|
204 |
+
# don't accept jac=True, so aren't included here.
|
205 |
+
i = 1
|
206 |
+
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
|
207 |
+
minimizer_kwargs = copy.copy(self.kwargs)
|
208 |
+
for method in methods:
|
209 |
+
minimizer_kwargs["method"] = method
|
210 |
+
res = basinhopping(func2d, self.x0[i],
|
211 |
+
minimizer_kwargs=minimizer_kwargs,
|
212 |
+
niter=self.niter, disp=self.disp)
|
213 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
214 |
+
|
215 |
+
def test_all_nograd_minimizers(self):
|
216 |
+
# Test 2-D minimizations without gradient. Newton-CG requires jac=True,
|
217 |
+
# so not included here.
|
218 |
+
i = 1
|
219 |
+
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
|
220 |
+
'Nelder-Mead', 'Powell', 'COBYLA']
|
221 |
+
minimizer_kwargs = copy.copy(self.kwargs_nograd)
|
222 |
+
for method in methods:
|
223 |
+
minimizer_kwargs["method"] = method
|
224 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
225 |
+
minimizer_kwargs=minimizer_kwargs,
|
226 |
+
niter=self.niter, disp=self.disp)
|
227 |
+
tol = self.tol
|
228 |
+
if method == 'COBYLA':
|
229 |
+
tol = 2
|
230 |
+
assert_almost_equal(res.x, self.sol[i], decimal=tol)
|
231 |
+
|
232 |
+
def test_pass_takestep(self):
|
233 |
+
# test that passing a custom takestep works
|
234 |
+
# also test that the stepsize is being adjusted
|
235 |
+
takestep = MyTakeStep1()
|
236 |
+
initial_step_size = takestep.stepsize
|
237 |
+
i = 1
|
238 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
239 |
+
niter=self.niter, disp=self.disp,
|
240 |
+
take_step=takestep)
|
241 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
242 |
+
assert_(takestep.been_called)
|
243 |
+
# make sure that the build in adaptive step size has been used
|
244 |
+
assert_(initial_step_size != takestep.stepsize)
|
245 |
+
|
246 |
+
def test_pass_simple_takestep(self):
|
247 |
+
# test that passing a custom takestep without attribute stepsize
|
248 |
+
takestep = myTakeStep2
|
249 |
+
i = 1
|
250 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
251 |
+
minimizer_kwargs=self.kwargs_nograd,
|
252 |
+
niter=self.niter, disp=self.disp,
|
253 |
+
take_step=takestep)
|
254 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
255 |
+
|
256 |
+
def test_pass_accept_test(self):
|
257 |
+
# test passing a custom accept test
|
258 |
+
# makes sure it's being used and ensures all the possible return values
|
259 |
+
# are accepted.
|
260 |
+
accept_test = MyAcceptTest()
|
261 |
+
i = 1
|
262 |
+
# there's no point in running it more than a few steps.
|
263 |
+
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
264 |
+
niter=10, disp=self.disp, accept_test=accept_test)
|
265 |
+
assert_(accept_test.been_called)
|
266 |
+
|
267 |
+
def test_pass_callback(self):
|
268 |
+
# test passing a custom callback function
|
269 |
+
# This makes sure it's being used. It also returns True after 10 steps
|
270 |
+
# to ensure that it's stopping early.
|
271 |
+
callback = MyCallBack()
|
272 |
+
i = 1
|
273 |
+
# there's no point in running it more than a few steps.
|
274 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
275 |
+
niter=30, disp=self.disp, callback=callback)
|
276 |
+
assert_(callback.been_called)
|
277 |
+
assert_("callback" in res.message[0])
|
278 |
+
# One of the calls of MyCallBack is during BasinHoppingRunner
|
279 |
+
# construction, so there are only 9 remaining before MyCallBack stops
|
280 |
+
# the minimization.
|
281 |
+
assert_equal(res.nit, 9)
|
282 |
+
|
283 |
+
def test_minimizer_fail(self):
|
284 |
+
# test if a minimizer fails
|
285 |
+
i = 1
|
286 |
+
self.kwargs["options"] = dict(maxiter=0)
|
287 |
+
self.niter = 10
|
288 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
289 |
+
niter=self.niter, disp=self.disp)
|
290 |
+
# the number of failed minimizations should be the number of
|
291 |
+
# iterations + 1
|
292 |
+
assert_equal(res.nit + 1, res.minimization_failures)
|
293 |
+
|
294 |
+
def test_niter_zero(self):
|
295 |
+
# gh5915, what happens if you call basinhopping with niter=0
|
296 |
+
i = 0
|
297 |
+
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
298 |
+
niter=0, disp=self.disp)
|
299 |
+
|
300 |
+
def test_seed_reproducibility(self):
|
301 |
+
# seed should ensure reproducibility between runs
|
302 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
303 |
+
|
304 |
+
f_1 = []
|
305 |
+
|
306 |
+
def callback(x, f, accepted):
|
307 |
+
f_1.append(f)
|
308 |
+
|
309 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
310 |
+
niter=10, callback=callback, seed=10)
|
311 |
+
|
312 |
+
f_2 = []
|
313 |
+
|
314 |
+
def callback2(x, f, accepted):
|
315 |
+
f_2.append(f)
|
316 |
+
|
317 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
318 |
+
niter=10, callback=callback2, seed=10)
|
319 |
+
assert_equal(np.array(f_1), np.array(f_2))
|
320 |
+
|
321 |
+
def test_random_gen(self):
|
322 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
323 |
+
rng = np.random.default_rng(1)
|
324 |
+
|
325 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
326 |
+
|
327 |
+
res1 = basinhopping(func2d, [1.0, 1.0],
|
328 |
+
minimizer_kwargs=minimizer_kwargs,
|
329 |
+
niter=10, seed=rng)
|
330 |
+
|
331 |
+
rng = np.random.default_rng(1)
|
332 |
+
res2 = basinhopping(func2d, [1.0, 1.0],
|
333 |
+
minimizer_kwargs=minimizer_kwargs,
|
334 |
+
niter=10, seed=rng)
|
335 |
+
assert_equal(res1.x, res2.x)
|
336 |
+
|
337 |
+
def test_monotonic_basin_hopping(self):
|
338 |
+
# test 1-D minimizations with gradient and T=0
|
339 |
+
i = 0
|
340 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
341 |
+
niter=self.niter, disp=self.disp, T=0)
|
342 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
343 |
+
|
344 |
+
|
345 |
+
class Test_Storage:
|
346 |
+
def setup_method(self):
|
347 |
+
self.x0 = np.array(1)
|
348 |
+
self.f0 = 0
|
349 |
+
|
350 |
+
minres = OptimizeResult(success=True)
|
351 |
+
minres.x = self.x0
|
352 |
+
minres.fun = self.f0
|
353 |
+
|
354 |
+
self.storage = Storage(minres)
|
355 |
+
|
356 |
+
def test_higher_f_rejected(self):
|
357 |
+
new_minres = OptimizeResult(success=True)
|
358 |
+
new_minres.x = self.x0 + 1
|
359 |
+
new_minres.fun = self.f0 + 1
|
360 |
+
|
361 |
+
ret = self.storage.update(new_minres)
|
362 |
+
minres = self.storage.get_lowest()
|
363 |
+
assert_equal(self.x0, minres.x)
|
364 |
+
assert_equal(self.f0, minres.fun)
|
365 |
+
assert_(not ret)
|
366 |
+
|
367 |
+
@pytest.mark.parametrize('success', [True, False])
|
368 |
+
def test_lower_f_accepted(self, success):
|
369 |
+
new_minres = OptimizeResult(success=success)
|
370 |
+
new_minres.x = self.x0 + 1
|
371 |
+
new_minres.fun = self.f0 - 1
|
372 |
+
|
373 |
+
ret = self.storage.update(new_minres)
|
374 |
+
minres = self.storage.get_lowest()
|
375 |
+
assert (self.x0 != minres.x) == success # can't use `is`
|
376 |
+
assert (self.f0 != minres.fun) == success # left side is NumPy bool
|
377 |
+
assert ret is success
|
378 |
+
|
379 |
+
|
380 |
+
class Test_RandomDisplacement:
|
381 |
+
def setup_method(self):
|
382 |
+
self.stepsize = 1.0
|
383 |
+
self.displace = RandomDisplacement(stepsize=self.stepsize)
|
384 |
+
self.N = 300000
|
385 |
+
self.x0 = np.zeros([self.N])
|
386 |
+
|
387 |
+
def test_random(self):
|
388 |
+
# the mean should be 0
|
389 |
+
# the variance should be (2*stepsize)**2 / 12
|
390 |
+
# note these tests are random, they will fail from time to time
|
391 |
+
x = self.displace(self.x0)
|
392 |
+
v = (2. * self.stepsize) ** 2 / 12
|
393 |
+
assert_almost_equal(np.mean(x), 0., 1)
|
394 |
+
assert_almost_equal(np.var(x), v, 1)
|
395 |
+
|
396 |
+
|
397 |
+
class Test_Metropolis:
|
398 |
+
def setup_method(self):
|
399 |
+
self.T = 2.
|
400 |
+
self.met = Metropolis(self.T)
|
401 |
+
self.res_new = OptimizeResult(success=True, fun=0.)
|
402 |
+
self.res_old = OptimizeResult(success=True, fun=1.)
|
403 |
+
|
404 |
+
def test_boolean_return(self):
|
405 |
+
# the return must be a bool, else an error will be raised in
|
406 |
+
# basinhopping
|
407 |
+
ret = self.met(res_new=self.res_new, res_old=self.res_old)
|
408 |
+
assert isinstance(ret, bool)
|
409 |
+
|
410 |
+
def test_lower_f_accepted(self):
|
411 |
+
assert_(self.met(res_new=self.res_new, res_old=self.res_old))
|
412 |
+
|
413 |
+
def test_accept(self):
|
414 |
+
# test that steps are randomly accepted for f_new > f_old
|
415 |
+
one_accept = False
|
416 |
+
one_reject = False
|
417 |
+
for i in range(1000):
|
418 |
+
if one_accept and one_reject:
|
419 |
+
break
|
420 |
+
res_new = OptimizeResult(success=True, fun=1.)
|
421 |
+
res_old = OptimizeResult(success=True, fun=0.5)
|
422 |
+
ret = self.met(res_new=res_new, res_old=res_old)
|
423 |
+
if ret:
|
424 |
+
one_accept = True
|
425 |
+
else:
|
426 |
+
one_reject = True
|
427 |
+
assert_(one_accept)
|
428 |
+
assert_(one_reject)
|
429 |
+
|
430 |
+
def test_GH7495(self):
|
431 |
+
# an overflow in exp was producing a RuntimeWarning
|
432 |
+
# create own object here in case someone changes self.T
|
433 |
+
met = Metropolis(2)
|
434 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
435 |
+
res_old = OptimizeResult(success=True, fun=2000)
|
436 |
+
with np.errstate(over='raise'):
|
437 |
+
met.accept_reject(res_new=res_new, res_old=res_old)
|
438 |
+
|
439 |
+
def test_gh7799(self):
|
440 |
+
# gh-7799 reported a problem in which local search was successful but
|
441 |
+
# basinhopping returned an invalid solution. Show that this is fixed.
|
442 |
+
def func(x):
|
443 |
+
return (x**2-8)**2+(x+2)**2
|
444 |
+
|
445 |
+
x0 = -4
|
446 |
+
limit = 50 # Constrain to func value >= 50
|
447 |
+
con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
|
448 |
+
res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con})
|
449 |
+
assert res.success
|
450 |
+
assert_allclose(res.fun, limit, rtol=1e-6)
|
451 |
+
|
452 |
+
def test_accept_gh7799(self):
|
453 |
+
# Metropolis should not accept the result of an unsuccessful new local
|
454 |
+
# search if the old local search was successful
|
455 |
+
|
456 |
+
met = Metropolis(0) # monotonic basin hopping
|
457 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
458 |
+
res_old = OptimizeResult(success=True, fun=1.)
|
459 |
+
|
460 |
+
# if new local search was successful and energy is lower, accept
|
461 |
+
assert met(res_new=res_new, res_old=res_old)
|
462 |
+
# if new res is unsuccessful, don't accept - even if energy is lower
|
463 |
+
res_new.success = False
|
464 |
+
assert not met(res_new=res_new, res_old=res_old)
|
465 |
+
# ...unless the old res was unsuccessful, too. In that case, why not?
|
466 |
+
res_old.success = False
|
467 |
+
assert met(res_new=res_new, res_old=res_old)
|
468 |
+
|
469 |
+
def test_reject_all_gh7799(self):
|
470 |
+
# Test the behavior when there is no feasible solution
|
471 |
+
def fun(x):
|
472 |
+
return x@x
|
473 |
+
|
474 |
+
def constraint(x):
|
475 |
+
return x + 1
|
476 |
+
|
477 |
+
kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
|
478 |
+
'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
|
479 |
+
res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
|
480 |
+
assert not res.success
|
481 |
+
|
482 |
+
|
483 |
+
class Test_AdaptiveStepsize:
|
484 |
+
def setup_method(self):
|
485 |
+
self.stepsize = 1.
|
486 |
+
self.ts = RandomDisplacement(stepsize=self.stepsize)
|
487 |
+
self.target_accept_rate = 0.5
|
488 |
+
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
|
489 |
+
accept_rate=self.target_accept_rate)
|
490 |
+
|
491 |
+
def test_adaptive_increase(self):
|
492 |
+
# if few steps are rejected, the stepsize should increase
|
493 |
+
x = 0.
|
494 |
+
self.takestep(x)
|
495 |
+
self.takestep.report(False)
|
496 |
+
for i in range(self.takestep.interval):
|
497 |
+
self.takestep(x)
|
498 |
+
self.takestep.report(True)
|
499 |
+
assert_(self.ts.stepsize > self.stepsize)
|
500 |
+
|
501 |
+
def test_adaptive_decrease(self):
|
502 |
+
# if few steps are rejected, the stepsize should increase
|
503 |
+
x = 0.
|
504 |
+
self.takestep(x)
|
505 |
+
self.takestep.report(True)
|
506 |
+
for i in range(self.takestep.interval):
|
507 |
+
self.takestep(x)
|
508 |
+
self.takestep.report(False)
|
509 |
+
assert_(self.ts.stepsize < self.stepsize)
|
510 |
+
|
511 |
+
def test_all_accepted(self):
|
512 |
+
# test that everything works OK if all steps were accepted
|
513 |
+
x = 0.
|
514 |
+
for i in range(self.takestep.interval + 1):
|
515 |
+
self.takestep(x)
|
516 |
+
self.takestep.report(True)
|
517 |
+
assert_(self.ts.stepsize > self.stepsize)
|
518 |
+
|
519 |
+
def test_all_rejected(self):
|
520 |
+
# test that everything works OK if all steps were rejected
|
521 |
+
x = 0.
|
522 |
+
for i in range(self.takestep.interval + 1):
|
523 |
+
self.takestep(x)
|
524 |
+
self.takestep.report(False)
|
525 |
+
assert_(self.ts.stepsize < self.stepsize)
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py
ADDED
@@ -0,0 +1,1677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for the differential global minimization algorithm.
|
3 |
+
"""
|
4 |
+
import multiprocessing
|
5 |
+
import platform
|
6 |
+
|
7 |
+
from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
|
8 |
+
_ConstraintWrapper)
|
9 |
+
from scipy.optimize import differential_evolution, OptimizeResult
|
10 |
+
from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
|
11 |
+
LinearConstraint)
|
12 |
+
from scipy.optimize import rosen, minimize
|
13 |
+
from scipy.sparse import csr_matrix
|
14 |
+
from scipy import stats
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
|
18 |
+
assert_string_equal, assert_, suppress_warnings)
|
19 |
+
from pytest import raises as assert_raises, warns
|
20 |
+
import pytest
|
21 |
+
|
22 |
+
|
23 |
+
class TestDifferentialEvolutionSolver:
|
24 |
+
|
25 |
+
def setup_method(self):
|
26 |
+
self.old_seterr = np.seterr(invalid='raise')
|
27 |
+
self.limits = np.array([[0., 0.],
|
28 |
+
[2., 2.]])
|
29 |
+
self.bounds = [(0., 2.), (0., 2.)]
|
30 |
+
|
31 |
+
self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
|
32 |
+
[(0, 100)])
|
33 |
+
|
34 |
+
# dummy_solver2 will be used to test mutation strategies
|
35 |
+
self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
|
36 |
+
[(0, 1)],
|
37 |
+
popsize=7,
|
38 |
+
mutation=0.5)
|
39 |
+
# create a population that's only 7 members long
|
40 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
41 |
+
population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
|
42 |
+
self.dummy_solver2.population = population
|
43 |
+
|
44 |
+
def teardown_method(self):
|
45 |
+
np.seterr(**self.old_seterr)
|
46 |
+
|
47 |
+
def quadratic(self, x):
|
48 |
+
return x[0]**2
|
49 |
+
|
50 |
+
def test__strategy_resolves(self):
|
51 |
+
# test that the correct mutation function is resolved by
|
52 |
+
# different requested strategy arguments
|
53 |
+
solver = DifferentialEvolutionSolver(rosen,
|
54 |
+
self.bounds,
|
55 |
+
strategy='best1exp')
|
56 |
+
assert_equal(solver.strategy, 'best1exp')
|
57 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
58 |
+
|
59 |
+
solver = DifferentialEvolutionSolver(rosen,
|
60 |
+
self.bounds,
|
61 |
+
strategy='best1bin')
|
62 |
+
assert_equal(solver.strategy, 'best1bin')
|
63 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
64 |
+
|
65 |
+
solver = DifferentialEvolutionSolver(rosen,
|
66 |
+
self.bounds,
|
67 |
+
strategy='rand1bin')
|
68 |
+
assert_equal(solver.strategy, 'rand1bin')
|
69 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
70 |
+
|
71 |
+
solver = DifferentialEvolutionSolver(rosen,
|
72 |
+
self.bounds,
|
73 |
+
strategy='rand1exp')
|
74 |
+
assert_equal(solver.strategy, 'rand1exp')
|
75 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
76 |
+
|
77 |
+
solver = DifferentialEvolutionSolver(rosen,
|
78 |
+
self.bounds,
|
79 |
+
strategy='rand2exp')
|
80 |
+
assert_equal(solver.strategy, 'rand2exp')
|
81 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
82 |
+
|
83 |
+
solver = DifferentialEvolutionSolver(rosen,
|
84 |
+
self.bounds,
|
85 |
+
strategy='best2bin')
|
86 |
+
assert_equal(solver.strategy, 'best2bin')
|
87 |
+
assert_equal(solver.mutation_func.__name__, '_best2')
|
88 |
+
|
89 |
+
solver = DifferentialEvolutionSolver(rosen,
|
90 |
+
self.bounds,
|
91 |
+
strategy='rand2bin')
|
92 |
+
assert_equal(solver.strategy, 'rand2bin')
|
93 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
94 |
+
|
95 |
+
solver = DifferentialEvolutionSolver(rosen,
|
96 |
+
self.bounds,
|
97 |
+
strategy='rand2exp')
|
98 |
+
assert_equal(solver.strategy, 'rand2exp')
|
99 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
100 |
+
|
101 |
+
solver = DifferentialEvolutionSolver(rosen,
|
102 |
+
self.bounds,
|
103 |
+
strategy='randtobest1bin')
|
104 |
+
assert_equal(solver.strategy, 'randtobest1bin')
|
105 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
106 |
+
|
107 |
+
solver = DifferentialEvolutionSolver(rosen,
|
108 |
+
self.bounds,
|
109 |
+
strategy='randtobest1exp')
|
110 |
+
assert_equal(solver.strategy, 'randtobest1exp')
|
111 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
112 |
+
|
113 |
+
solver = DifferentialEvolutionSolver(rosen,
|
114 |
+
self.bounds,
|
115 |
+
strategy='currenttobest1bin')
|
116 |
+
assert_equal(solver.strategy, 'currenttobest1bin')
|
117 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
118 |
+
|
119 |
+
solver = DifferentialEvolutionSolver(rosen,
|
120 |
+
self.bounds,
|
121 |
+
strategy='currenttobest1exp')
|
122 |
+
assert_equal(solver.strategy, 'currenttobest1exp')
|
123 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
124 |
+
|
125 |
+
def test__mutate1(self):
|
126 |
+
# strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
|
127 |
+
result = np.array([0.05])
|
128 |
+
trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
|
129 |
+
assert_allclose(trial, result)
|
130 |
+
|
131 |
+
result = np.array([0.25])
|
132 |
+
trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
|
133 |
+
assert_allclose(trial, result)
|
134 |
+
|
135 |
+
def test__mutate2(self):
|
136 |
+
# strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
|
137 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
138 |
+
|
139 |
+
result = np.array([-0.1])
|
140 |
+
trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
|
141 |
+
assert_allclose(trial, result)
|
142 |
+
|
143 |
+
result = np.array([0.1])
|
144 |
+
trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
|
145 |
+
assert_allclose(trial, result)
|
146 |
+
|
147 |
+
def test__randtobest1(self):
|
148 |
+
# strategies randtobest/1/*
|
149 |
+
result = np.array([0.15])
|
150 |
+
trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
|
151 |
+
assert_allclose(trial, result)
|
152 |
+
|
153 |
+
def test__currenttobest1(self):
|
154 |
+
# strategies currenttobest/1/*
|
155 |
+
result = np.array([0.1])
|
156 |
+
trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
|
157 |
+
assert_allclose(trial, result)
|
158 |
+
|
159 |
+
def test_can_init_with_dithering(self):
|
160 |
+
mutation = (0.5, 1)
|
161 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
162 |
+
self.bounds,
|
163 |
+
mutation=mutation)
|
164 |
+
|
165 |
+
assert_equal(solver.dither, list(mutation))
|
166 |
+
|
167 |
+
def test_invalid_mutation_values_arent_accepted(self):
|
168 |
+
func = rosen
|
169 |
+
mutation = (0.5, 3)
|
170 |
+
assert_raises(ValueError,
|
171 |
+
DifferentialEvolutionSolver,
|
172 |
+
func,
|
173 |
+
self.bounds,
|
174 |
+
mutation=mutation)
|
175 |
+
|
176 |
+
mutation = (-1, 1)
|
177 |
+
assert_raises(ValueError,
|
178 |
+
DifferentialEvolutionSolver,
|
179 |
+
func,
|
180 |
+
self.bounds,
|
181 |
+
mutation=mutation)
|
182 |
+
|
183 |
+
mutation = (0.1, np.nan)
|
184 |
+
assert_raises(ValueError,
|
185 |
+
DifferentialEvolutionSolver,
|
186 |
+
func,
|
187 |
+
self.bounds,
|
188 |
+
mutation=mutation)
|
189 |
+
|
190 |
+
mutation = 0.5
|
191 |
+
solver = DifferentialEvolutionSolver(func,
|
192 |
+
self.bounds,
|
193 |
+
mutation=mutation)
|
194 |
+
assert_equal(0.5, solver.scale)
|
195 |
+
assert_equal(None, solver.dither)
|
196 |
+
|
197 |
+
def test_invalid_functional(self):
|
198 |
+
def func(x):
|
199 |
+
return np.array([np.sum(x ** 2), np.sum(x)])
|
200 |
+
|
201 |
+
with assert_raises(
|
202 |
+
RuntimeError,
|
203 |
+
match=r"func\(x, \*args\) must return a scalar value"):
|
204 |
+
differential_evolution(func, [(-2, 2), (-2, 2)])
|
205 |
+
|
206 |
+
def test__scale_parameters(self):
|
207 |
+
trial = np.array([0.3])
|
208 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
209 |
+
|
210 |
+
# it should also work with the limits reversed
|
211 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
212 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
213 |
+
|
214 |
+
def test__unscale_parameters(self):
|
215 |
+
trial = np.array([30])
|
216 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
217 |
+
|
218 |
+
# it should also work with the limits reversed
|
219 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
220 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
221 |
+
|
222 |
+
def test_equal_bounds(self):
|
223 |
+
with np.errstate(invalid='raise'):
|
224 |
+
solver = DifferentialEvolutionSolver(
|
225 |
+
self.quadratic,
|
226 |
+
bounds=[(2.0, 2.0), (1.0, 3.0)]
|
227 |
+
)
|
228 |
+
v = solver._unscale_parameters([2.0, 2.0])
|
229 |
+
assert_allclose(v, 0.5)
|
230 |
+
|
231 |
+
res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)])
|
232 |
+
assert_equal(res.x, [2.0, 3.0])
|
233 |
+
|
234 |
+
def test__ensure_constraint(self):
|
235 |
+
trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
|
236 |
+
self.dummy_solver._ensure_constraint(trial)
|
237 |
+
|
238 |
+
assert_equal(trial[2], 0.9)
|
239 |
+
assert_(np.logical_and(trial >= 0, trial <= 1).all())
|
240 |
+
|
241 |
+
def test_differential_evolution(self):
|
242 |
+
# test that the Jmin of DifferentialEvolutionSolver
|
243 |
+
# is the same as the function evaluation
|
244 |
+
solver = DifferentialEvolutionSolver(
|
245 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=False
|
246 |
+
)
|
247 |
+
result = solver.solve()
|
248 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
249 |
+
|
250 |
+
solver = DifferentialEvolutionSolver(
|
251 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=True
|
252 |
+
)
|
253 |
+
result = solver.solve()
|
254 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
255 |
+
|
256 |
+
def test_best_solution_retrieval(self):
|
257 |
+
# test that the getter property method for the best solution works.
|
258 |
+
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
|
259 |
+
result = solver.solve()
|
260 |
+
assert_equal(result.x, solver.x)
|
261 |
+
|
262 |
+
def test_intermediate_result(self):
|
263 |
+
# Check that intermediate result object passed into the callback
|
264 |
+
# function contains the expected information and that raising
|
265 |
+
# `StopIteration` causes the expected behavior.
|
266 |
+
maxiter = 10
|
267 |
+
|
268 |
+
def func(x):
|
269 |
+
val = rosen(x)
|
270 |
+
if val < func.val:
|
271 |
+
func.x = x
|
272 |
+
func.val = val
|
273 |
+
return val
|
274 |
+
func.x = None
|
275 |
+
func.val = np.inf
|
276 |
+
|
277 |
+
def callback(intermediate_result):
|
278 |
+
callback.nit += 1
|
279 |
+
callback.intermediate_result = intermediate_result
|
280 |
+
assert intermediate_result.population.ndim == 2
|
281 |
+
assert intermediate_result.population.shape[1] == 2
|
282 |
+
assert intermediate_result.nit == callback.nit
|
283 |
+
|
284 |
+
# Check that `x` and `fun` attributes are the best found so far
|
285 |
+
assert_equal(intermediate_result.x, callback.func.x)
|
286 |
+
assert_equal(intermediate_result.fun, callback.func.val)
|
287 |
+
|
288 |
+
# Check for consistency between `fun`, `population_energies`,
|
289 |
+
# `x`, and `population`
|
290 |
+
assert_equal(intermediate_result.fun, rosen(intermediate_result.x))
|
291 |
+
for i in range(len(intermediate_result.population_energies)):
|
292 |
+
res = intermediate_result.population_energies[i]
|
293 |
+
ref = rosen(intermediate_result.population[i])
|
294 |
+
assert_equal(res, ref)
|
295 |
+
assert_equal(intermediate_result.x,
|
296 |
+
intermediate_result.population[0])
|
297 |
+
assert_equal(intermediate_result.fun,
|
298 |
+
intermediate_result.population_energies[0])
|
299 |
+
|
300 |
+
assert intermediate_result.message == 'in progress'
|
301 |
+
assert intermediate_result.success is True
|
302 |
+
assert isinstance(intermediate_result, OptimizeResult)
|
303 |
+
if callback.nit == maxiter:
|
304 |
+
raise StopIteration
|
305 |
+
callback.nit = 0
|
306 |
+
callback.intermediate_result = None
|
307 |
+
callback.func = func
|
308 |
+
|
309 |
+
bounds = [(0, 2), (0, 2)]
|
310 |
+
kwargs = dict(func=func, bounds=bounds, seed=838245, polish=False)
|
311 |
+
res = differential_evolution(**kwargs, callback=callback)
|
312 |
+
ref = differential_evolution(**kwargs, maxiter=maxiter)
|
313 |
+
|
314 |
+
# Check that final `intermediate_result` is equivalent to returned
|
315 |
+
# result object and that terminating with callback `StopIteration`
|
316 |
+
# after `maxiter` iterations is equivalent to terminating with
|
317 |
+
# `maxiter` parameter.
|
318 |
+
assert res.success is ref.success is False
|
319 |
+
assert callback.nit == res.nit == maxiter
|
320 |
+
assert res.message == 'callback function requested stop early'
|
321 |
+
assert ref.message == 'Maximum number of iterations has been exceeded.'
|
322 |
+
for field, val in ref.items():
|
323 |
+
if field in {'message', 'success'}: # checked separately
|
324 |
+
continue
|
325 |
+
assert_equal(callback.intermediate_result[field], val)
|
326 |
+
assert_equal(res[field], val)
|
327 |
+
|
328 |
+
# Check that polish occurs after `StopIteration` as advertised
|
329 |
+
callback.nit = 0
|
330 |
+
func.val = np.inf
|
331 |
+
kwargs['polish'] = True
|
332 |
+
res = differential_evolution(**kwargs, callback=callback)
|
333 |
+
assert res.fun < ref.fun
|
334 |
+
|
335 |
+
def test_callback_terminates(self):
|
336 |
+
# test that if the callback returns true, then the minimization halts
|
337 |
+
bounds = [(0, 2), (0, 2)]
|
338 |
+
expected_msg = 'callback function requested stop early'
|
339 |
+
def callback_python_true(param, convergence=0.):
|
340 |
+
return True
|
341 |
+
|
342 |
+
result = differential_evolution(
|
343 |
+
rosen, bounds, callback=callback_python_true
|
344 |
+
)
|
345 |
+
assert_string_equal(result.message, expected_msg)
|
346 |
+
|
347 |
+
# if callback raises StopIteration then solve should be interrupted
|
348 |
+
def callback_stop(intermediate_result):
|
349 |
+
raise StopIteration
|
350 |
+
|
351 |
+
result = differential_evolution(rosen, bounds, callback=callback_stop)
|
352 |
+
assert not result.success
|
353 |
+
|
354 |
+
def callback_evaluates_true(param, convergence=0.):
|
355 |
+
# DE should stop if bool(self.callback) is True
|
356 |
+
return [10]
|
357 |
+
|
358 |
+
result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
|
359 |
+
assert_string_equal(result.message, expected_msg)
|
360 |
+
assert not result.success
|
361 |
+
|
362 |
+
def callback_evaluates_false(param, convergence=0.):
|
363 |
+
return []
|
364 |
+
|
365 |
+
result = differential_evolution(rosen, bounds,
|
366 |
+
callback=callback_evaluates_false)
|
367 |
+
assert result.success
|
368 |
+
|
369 |
+
def test_args_tuple_is_passed(self):
|
370 |
+
# test that the args tuple is passed to the cost function properly.
|
371 |
+
bounds = [(-10, 10)]
|
372 |
+
args = (1., 2., 3.)
|
373 |
+
|
374 |
+
def quadratic(x, *args):
|
375 |
+
if type(args) != tuple:
|
376 |
+
raise ValueError('args should be a tuple')
|
377 |
+
return args[0] + args[1] * x + args[2] * x**2.
|
378 |
+
|
379 |
+
result = differential_evolution(quadratic,
|
380 |
+
bounds,
|
381 |
+
args=args,
|
382 |
+
polish=True)
|
383 |
+
assert_almost_equal(result.fun, 2 / 3.)
|
384 |
+
|
385 |
+
def test_init_with_invalid_strategy(self):
|
386 |
+
# test that passing an invalid strategy raises ValueError
|
387 |
+
func = rosen
|
388 |
+
bounds = [(-3, 3)]
|
389 |
+
assert_raises(ValueError,
|
390 |
+
differential_evolution,
|
391 |
+
func,
|
392 |
+
bounds,
|
393 |
+
strategy='abc')
|
394 |
+
|
395 |
+
def test_bounds_checking(self):
|
396 |
+
# test that the bounds checking works
|
397 |
+
func = rosen
|
398 |
+
bounds = [(-3)]
|
399 |
+
assert_raises(ValueError,
|
400 |
+
differential_evolution,
|
401 |
+
func,
|
402 |
+
bounds)
|
403 |
+
bounds = [(-3, 3), (3, 4, 5)]
|
404 |
+
assert_raises(ValueError,
|
405 |
+
differential_evolution,
|
406 |
+
func,
|
407 |
+
bounds)
|
408 |
+
|
409 |
+
# test that we can use a new-type Bounds object
|
410 |
+
result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
|
411 |
+
assert_almost_equal(result.x, (1., 1.))
|
412 |
+
|
413 |
+
def test_select_samples(self):
|
414 |
+
# select_samples should return 5 separate random numbers.
|
415 |
+
limits = np.arange(12., dtype='float64').reshape(2, 6)
|
416 |
+
bounds = list(zip(limits[0, :], limits[1, :]))
|
417 |
+
solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
|
418 |
+
candidate = 0
|
419 |
+
r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
|
420 |
+
assert_equal(
|
421 |
+
len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
|
422 |
+
|
423 |
+
def test_maxiter_stops_solve(self):
|
424 |
+
# test that if the maximum number of iterations is exceeded
|
425 |
+
# the solver stops.
|
426 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
|
427 |
+
result = solver.solve()
|
428 |
+
assert_equal(result.success, False)
|
429 |
+
assert_equal(result.message,
|
430 |
+
'Maximum number of iterations has been exceeded.')
|
431 |
+
|
432 |
+
def test_maxfun_stops_solve(self):
|
433 |
+
# test that if the maximum number of function evaluations is exceeded
|
434 |
+
# during initialisation the solver stops
|
435 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
|
436 |
+
polish=False)
|
437 |
+
result = solver.solve()
|
438 |
+
|
439 |
+
assert_equal(result.nfev, 2)
|
440 |
+
assert_equal(result.success, False)
|
441 |
+
assert_equal(result.message,
|
442 |
+
'Maximum number of function evaluations has '
|
443 |
+
'been exceeded.')
|
444 |
+
|
445 |
+
# test that if the maximum number of function evaluations is exceeded
|
446 |
+
# during the actual minimisation, then the solver stops.
|
447 |
+
# Have to turn polishing off, as this will still occur even if maxfun
|
448 |
+
# is reached. For popsize=5 and len(bounds)=2, then there are only 10
|
449 |
+
# function evaluations during initialisation.
|
450 |
+
solver = DifferentialEvolutionSolver(rosen,
|
451 |
+
self.bounds,
|
452 |
+
popsize=5,
|
453 |
+
polish=False,
|
454 |
+
maxfun=40)
|
455 |
+
result = solver.solve()
|
456 |
+
|
457 |
+
assert_equal(result.nfev, 41)
|
458 |
+
assert_equal(result.success, False)
|
459 |
+
assert_equal(result.message,
|
460 |
+
'Maximum number of function evaluations has '
|
461 |
+
'been exceeded.')
|
462 |
+
|
463 |
+
# now repeat for updating='deferred version
|
464 |
+
# 47 function evaluations is not a multiple of the population size,
|
465 |
+
# so maxfun is reached partway through a population evaluation.
|
466 |
+
solver = DifferentialEvolutionSolver(rosen,
|
467 |
+
self.bounds,
|
468 |
+
popsize=5,
|
469 |
+
polish=False,
|
470 |
+
maxfun=47,
|
471 |
+
updating='deferred')
|
472 |
+
result = solver.solve()
|
473 |
+
|
474 |
+
assert_equal(result.nfev, 47)
|
475 |
+
assert_equal(result.success, False)
|
476 |
+
assert_equal(result.message,
|
477 |
+
'Maximum number of function evaluations has '
|
478 |
+
'been reached.')
|
479 |
+
|
480 |
+
def test_quadratic(self):
|
481 |
+
# test the quadratic function from object
|
482 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
483 |
+
[(-100, 100)],
|
484 |
+
tol=0.02)
|
485 |
+
solver.solve()
|
486 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
487 |
+
|
488 |
+
def test_quadratic_from_diff_ev(self):
|
489 |
+
# test the quadratic function from differential_evolution function
|
490 |
+
differential_evolution(self.quadratic,
|
491 |
+
[(-100, 100)],
|
492 |
+
tol=0.02)
|
493 |
+
|
494 |
+
def test_seed_gives_repeatability(self):
|
495 |
+
result = differential_evolution(self.quadratic,
|
496 |
+
[(-100, 100)],
|
497 |
+
polish=False,
|
498 |
+
seed=1,
|
499 |
+
tol=0.5)
|
500 |
+
result2 = differential_evolution(self.quadratic,
|
501 |
+
[(-100, 100)],
|
502 |
+
polish=False,
|
503 |
+
seed=1,
|
504 |
+
tol=0.5)
|
505 |
+
assert_equal(result.x, result2.x)
|
506 |
+
assert_equal(result.nfev, result2.nfev)
|
507 |
+
|
508 |
+
def test_random_generator(self):
|
509 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
510 |
+
# obtain a np.random.Generator object
|
511 |
+
rng = np.random.default_rng()
|
512 |
+
|
513 |
+
inits = ['random', 'latinhypercube', 'sobol', 'halton']
|
514 |
+
for init in inits:
|
515 |
+
differential_evolution(self.quadratic,
|
516 |
+
[(-100, 100)],
|
517 |
+
polish=False,
|
518 |
+
seed=rng,
|
519 |
+
tol=0.5,
|
520 |
+
init=init)
|
521 |
+
|
522 |
+
def test_exp_runs(self):
|
523 |
+
# test whether exponential mutation loop runs
|
524 |
+
solver = DifferentialEvolutionSolver(rosen,
|
525 |
+
self.bounds,
|
526 |
+
strategy='best1exp',
|
527 |
+
maxiter=1)
|
528 |
+
|
529 |
+
solver.solve()
|
530 |
+
|
531 |
+
def test_gh_4511_regression(self):
|
532 |
+
# This modification of the differential evolution docstring example
|
533 |
+
# uses a custom popsize that had triggered an off-by-one error.
|
534 |
+
# Because we do not care about solving the optimization problem in
|
535 |
+
# this test, we use maxiter=1 to reduce the testing time.
|
536 |
+
bounds = [(-5, 5), (-5, 5)]
|
537 |
+
# result = differential_evolution(rosen, bounds, popsize=1815,
|
538 |
+
# maxiter=1)
|
539 |
+
|
540 |
+
# the original issue arose because of rounding error in arange, with
|
541 |
+
# linspace being a much better solution. 1815 is quite a large popsize
|
542 |
+
# to use and results in a long test time (~13s). I used the original
|
543 |
+
# issue to figure out the lowest number of samples that would cause
|
544 |
+
# this rounding error to occur, 49.
|
545 |
+
differential_evolution(rosen, bounds, popsize=49, maxiter=1)
|
546 |
+
|
547 |
+
def test_calculate_population_energies(self):
|
548 |
+
# if popsize is 3, then the overall generation has size (6,)
|
549 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
|
550 |
+
solver._calculate_population_energies(solver.population)
|
551 |
+
solver._promote_lowest_energy()
|
552 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
553 |
+
|
554 |
+
# initial calculation of the energies should require 6 nfev.
|
555 |
+
assert_equal(solver._nfev, 6)
|
556 |
+
|
557 |
+
def test_iteration(self):
|
558 |
+
# test that DifferentialEvolutionSolver is iterable
|
559 |
+
# if popsize is 3, then the overall generation has size (6,)
|
560 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
|
561 |
+
maxfun=12)
|
562 |
+
x, fun = next(solver)
|
563 |
+
assert_equal(np.size(x, 0), 2)
|
564 |
+
|
565 |
+
# 6 nfev are required for initial calculation of energies, 6 nfev are
|
566 |
+
# required for the evolution of the 6 population members.
|
567 |
+
assert_equal(solver._nfev, 12)
|
568 |
+
|
569 |
+
# the next generation should halt because it exceeds maxfun
|
570 |
+
assert_raises(StopIteration, next, solver)
|
571 |
+
|
572 |
+
# check a proper minimisation can be done by an iterable solver
|
573 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
574 |
+
_, fun_prev = next(solver)
|
575 |
+
for i, soln in enumerate(solver):
|
576 |
+
x_current, fun_current = soln
|
577 |
+
assert fun_prev >= fun_current
|
578 |
+
_, fun_prev = x_current, fun_current
|
579 |
+
# need to have this otherwise the solver would never stop.
|
580 |
+
if i == 50:
|
581 |
+
break
|
582 |
+
|
583 |
+
def test_convergence(self):
|
584 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
|
585 |
+
polish=False)
|
586 |
+
solver.solve()
|
587 |
+
assert_(solver.convergence < 0.2)
|
588 |
+
|
589 |
+
def test_maxiter_none_GH5731(self):
|
590 |
+
# Pre 0.17 the previous default for maxiter and maxfun was None.
|
591 |
+
# the numerical defaults are now 1000 and np.inf. However, some scripts
|
592 |
+
# will still supply None for both of those, this will raise a TypeError
|
593 |
+
# in the solve method.
|
594 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
|
595 |
+
maxfun=None)
|
596 |
+
solver.solve()
|
597 |
+
|
598 |
+
def test_population_initiation(self):
|
599 |
+
# test the different modes of population initiation
|
600 |
+
|
601 |
+
# init must be either 'latinhypercube' or 'random'
|
602 |
+
# raising ValueError is something else is passed in
|
603 |
+
assert_raises(ValueError,
|
604 |
+
DifferentialEvolutionSolver,
|
605 |
+
*(rosen, self.bounds),
|
606 |
+
**{'init': 'rubbish'})
|
607 |
+
|
608 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
609 |
+
|
610 |
+
# check that population initiation:
|
611 |
+
# 1) resets _nfev to 0
|
612 |
+
# 2) all population energies are np.inf
|
613 |
+
solver.init_population_random()
|
614 |
+
assert_equal(solver._nfev, 0)
|
615 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
616 |
+
|
617 |
+
solver.init_population_lhs()
|
618 |
+
assert_equal(solver._nfev, 0)
|
619 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
620 |
+
|
621 |
+
solver.init_population_qmc(qmc_engine='halton')
|
622 |
+
assert_equal(solver._nfev, 0)
|
623 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
624 |
+
|
625 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
|
626 |
+
solver.init_population_qmc(qmc_engine='sobol')
|
627 |
+
assert_equal(solver._nfev, 0)
|
628 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
629 |
+
|
630 |
+
# we should be able to initialize with our own array
|
631 |
+
population = np.linspace(-1, 3, 10).reshape(5, 2)
|
632 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds,
|
633 |
+
init=population,
|
634 |
+
strategy='best2bin',
|
635 |
+
atol=0.01, seed=1, popsize=5)
|
636 |
+
|
637 |
+
assert_equal(solver._nfev, 0)
|
638 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
639 |
+
assert_(solver.num_population_members == 5)
|
640 |
+
assert_(solver.population_shape == (5, 2))
|
641 |
+
|
642 |
+
# check that the population was initialized correctly
|
643 |
+
unscaled_population = np.clip(solver._unscale_parameters(population),
|
644 |
+
0, 1)
|
645 |
+
assert_almost_equal(solver.population[:5], unscaled_population)
|
646 |
+
|
647 |
+
# population values need to be clipped to bounds
|
648 |
+
assert_almost_equal(np.min(solver.population[:5]), 0)
|
649 |
+
assert_almost_equal(np.max(solver.population[:5]), 1)
|
650 |
+
|
651 |
+
# shouldn't be able to initialize with an array if it's the wrong shape
|
652 |
+
# this would have too many parameters
|
653 |
+
population = np.linspace(-1, 3, 15).reshape(5, 3)
|
654 |
+
assert_raises(ValueError,
|
655 |
+
DifferentialEvolutionSolver,
|
656 |
+
*(rosen, self.bounds),
|
657 |
+
**{'init': population})
|
658 |
+
|
659 |
+
# provide an initial solution
|
660 |
+
# bounds are [(0, 2), (0, 2)]
|
661 |
+
x0 = np.random.uniform(low=0.0, high=2.0, size=2)
|
662 |
+
solver = DifferentialEvolutionSolver(
|
663 |
+
rosen, self.bounds, x0=x0
|
664 |
+
)
|
665 |
+
# parameters are scaled to unit interval
|
666 |
+
assert_allclose(solver.population[0], x0 / 2.0)
|
667 |
+
|
668 |
+
def test_x0(self):
|
669 |
+
# smoke test that checks that x0 is usable.
|
670 |
+
res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
|
671 |
+
assert res.success
|
672 |
+
|
673 |
+
# check what happens if some of the x0 lay outside the bounds
|
674 |
+
with assert_raises(ValueError):
|
675 |
+
differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
|
676 |
+
|
677 |
+
def test_infinite_objective_function(self):
|
678 |
+
# Test that there are no problems if the objective function
|
679 |
+
# returns inf on some runs
|
680 |
+
def sometimes_inf(x):
|
681 |
+
if x[0] < .5:
|
682 |
+
return np.inf
|
683 |
+
return x[1]
|
684 |
+
bounds = [(0, 1), (0, 1)]
|
685 |
+
differential_evolution(sometimes_inf, bounds=bounds, disp=False)
|
686 |
+
|
687 |
+
def test_deferred_updating(self):
|
688 |
+
# check setting of deferred updating, with default workers
|
689 |
+
bounds = [(0., 2.), (0., 2.)]
|
690 |
+
solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
|
691 |
+
assert_(solver._updating == 'deferred')
|
692 |
+
assert_(solver._mapwrapper._mapfunc is map)
|
693 |
+
solver.solve()
|
694 |
+
|
695 |
+
def test_immediate_updating(self):
|
696 |
+
# check setting of immediate updating, with default workers
|
697 |
+
bounds = [(0., 2.), (0., 2.)]
|
698 |
+
solver = DifferentialEvolutionSolver(rosen, bounds)
|
699 |
+
assert_(solver._updating == 'immediate')
|
700 |
+
|
701 |
+
# Safely forking from a multithreaded process is
|
702 |
+
# problematic, and deprecated in Python 3.12, so
|
703 |
+
# we use a slower but portable alternative
|
704 |
+
# see gh-19848
|
705 |
+
ctx = multiprocessing.get_context("spawn")
|
706 |
+
with ctx.Pool(2) as p:
|
707 |
+
# should raise a UserWarning because the updating='immediate'
|
708 |
+
# is being overridden by the workers keyword
|
709 |
+
with warns(UserWarning):
|
710 |
+
with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s:
|
711 |
+
pass
|
712 |
+
assert s._updating == 'deferred'
|
713 |
+
|
714 |
+
def test_parallel(self):
|
715 |
+
# smoke test for parallelization with deferred updating
|
716 |
+
bounds = [(0., 2.), (0., 2.)]
|
717 |
+
with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(
|
718 |
+
rosen, bounds, updating='deferred', workers=p.map) as solver:
|
719 |
+
assert_(solver._mapwrapper.pool is not None)
|
720 |
+
assert_(solver._updating == 'deferred')
|
721 |
+
solver.solve()
|
722 |
+
|
723 |
+
with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',
|
724 |
+
workers=2) as solver:
|
725 |
+
assert_(solver._mapwrapper.pool is not None)
|
726 |
+
assert_(solver._updating == 'deferred')
|
727 |
+
solver.solve()
|
728 |
+
|
729 |
+
def test_converged(self):
|
730 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
|
731 |
+
solver.solve()
|
732 |
+
assert_(solver.converged())
|
733 |
+
|
734 |
+
def test_constraint_violation_fn(self):
|
735 |
+
def constr_f(x):
|
736 |
+
return [x[0] + x[1]]
|
737 |
+
|
738 |
+
def constr_f2(x):
|
739 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
740 |
+
|
741 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
742 |
+
|
743 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
744 |
+
constraints=(nlc))
|
745 |
+
|
746 |
+
cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
|
747 |
+
assert_almost_equal(cv, 0.1)
|
748 |
+
|
749 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
750 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
751 |
+
constraints=(nlc, nlc2))
|
752 |
+
|
753 |
+
# for multiple constraints the constraint violations should
|
754 |
+
# be concatenated.
|
755 |
+
xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
|
756 |
+
vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
|
757 |
+
|
758 |
+
for x, v in zip(xs, vs):
|
759 |
+
cv = solver._constraint_violation_fn(np.array(x))
|
760 |
+
assert_allclose(cv, np.atleast_2d(v))
|
761 |
+
|
762 |
+
# vectorized calculation of a series of solutions
|
763 |
+
assert_allclose(
|
764 |
+
solver._constraint_violation_fn(np.array(xs)), np.array(vs)
|
765 |
+
)
|
766 |
+
|
767 |
+
# the following line is used in _calculate_population_feasibilities.
|
768 |
+
# _constraint_violation_fn returns an (1, M) array when
|
769 |
+
# x.shape == (N,), i.e. a single solution. Therefore this list
|
770 |
+
# comprehension should generate (S, 1, M) array.
|
771 |
+
constraint_violation = np.array([solver._constraint_violation_fn(x)
|
772 |
+
for x in np.array(xs)])
|
773 |
+
assert constraint_violation.shape == (3, 1, 3)
|
774 |
+
|
775 |
+
# we need reasonable error messages if the constraint function doesn't
|
776 |
+
# return the right thing
|
777 |
+
def constr_f3(x):
|
778 |
+
# returns (S, M), rather than (M, S)
|
779 |
+
return constr_f2(x).T
|
780 |
+
|
781 |
+
nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
|
782 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
783 |
+
constraints=(nlc, nlc2),
|
784 |
+
vectorized=False)
|
785 |
+
solver.vectorized = True
|
786 |
+
with pytest.raises(
|
787 |
+
RuntimeError, match="An array returned from a Constraint"
|
788 |
+
):
|
789 |
+
solver._constraint_violation_fn(np.array(xs))
|
790 |
+
|
791 |
+
def test_constraint_population_feasibilities(self):
|
792 |
+
def constr_f(x):
|
793 |
+
return [x[0] + x[1]]
|
794 |
+
|
795 |
+
def constr_f2(x):
|
796 |
+
return [x[0]**2 + x[1], x[0] - x[1]]
|
797 |
+
|
798 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
799 |
+
|
800 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
801 |
+
constraints=(nlc))
|
802 |
+
|
803 |
+
# are population feasibilities correct
|
804 |
+
# [0.5, 0.5] corresponds to scaled values of [1., 1.]
|
805 |
+
feas, cv = solver._calculate_population_feasibilities(
|
806 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
807 |
+
assert_equal(feas, [False, False])
|
808 |
+
assert_almost_equal(cv, np.array([[0.1], [2.1]]))
|
809 |
+
assert cv.shape == (2, 1)
|
810 |
+
|
811 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
812 |
+
|
813 |
+
for vectorize in [False, True]:
|
814 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
815 |
+
constraints=(nlc, nlc2),
|
816 |
+
vectorized=vectorize,
|
817 |
+
updating='deferred')
|
818 |
+
|
819 |
+
feas, cv = solver._calculate_population_feasibilities(
|
820 |
+
np.array([[0.5, 0.5], [0.6, 0.5]]))
|
821 |
+
assert_equal(feas, [False, False])
|
822 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
|
823 |
+
|
824 |
+
feas, cv = solver._calculate_population_feasibilities(
|
825 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
826 |
+
assert_equal(feas, [False, False])
|
827 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
|
828 |
+
assert cv.shape == (2, 3)
|
829 |
+
|
830 |
+
feas, cv = solver._calculate_population_feasibilities(
|
831 |
+
np.array([[0.25, 0.25], [1., 1.]]))
|
832 |
+
assert_equal(feas, [True, False])
|
833 |
+
assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
|
834 |
+
assert cv.shape == (2, 3)
|
835 |
+
|
836 |
+
def test_constraint_solve(self):
|
837 |
+
def constr_f(x):
|
838 |
+
return np.array([x[0] + x[1]])
|
839 |
+
|
840 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
841 |
+
|
842 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
843 |
+
constraints=(nlc))
|
844 |
+
|
845 |
+
# trust-constr warns if the constraint function is linear
|
846 |
+
with warns(UserWarning):
|
847 |
+
res = solver.solve()
|
848 |
+
|
849 |
+
assert constr_f(res.x) <= 1.9
|
850 |
+
assert res.success
|
851 |
+
|
852 |
+
def test_impossible_constraint(self):
|
853 |
+
def constr_f(x):
|
854 |
+
return np.array([x[0] + x[1]])
|
855 |
+
|
856 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, -1)
|
857 |
+
|
858 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
859 |
+
constraints=(nlc), popsize=3,
|
860 |
+
seed=1)
|
861 |
+
|
862 |
+
# a UserWarning is issued because the 'trust-constr' polishing is
|
863 |
+
# attempted on the least infeasible solution found.
|
864 |
+
with warns(UserWarning):
|
865 |
+
res = solver.solve()
|
866 |
+
|
867 |
+
assert res.maxcv > 0
|
868 |
+
assert not res.success
|
869 |
+
|
870 |
+
# test _promote_lowest_energy works when none of the population is
|
871 |
+
# feasible. In this case, the solution with the lowest constraint
|
872 |
+
# violation should be promoted.
|
873 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
874 |
+
constraints=(nlc), polish=False)
|
875 |
+
next(solver)
|
876 |
+
assert not solver.feasible.all()
|
877 |
+
assert not np.isfinite(solver.population_energies).all()
|
878 |
+
|
879 |
+
# now swap two of the entries in the population
|
880 |
+
l = 20
|
881 |
+
cv = solver.constraint_violation[0]
|
882 |
+
|
883 |
+
solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
|
884 |
+
solver.population[[0, l], :] = solver.population[[l, 0], :]
|
885 |
+
solver.constraint_violation[[0, l], :] = (
|
886 |
+
solver.constraint_violation[[l, 0], :])
|
887 |
+
|
888 |
+
solver._promote_lowest_energy()
|
889 |
+
assert_equal(solver.constraint_violation[0], cv)
|
890 |
+
|
891 |
+
def test_accept_trial(self):
|
892 |
+
# _accept_trial(self, energy_trial, feasible_trial, cv_trial,
|
893 |
+
# energy_orig, feasible_orig, cv_orig)
|
894 |
+
def constr_f(x):
|
895 |
+
return [x[0] + x[1]]
|
896 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
897 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
898 |
+
constraints=(nlc))
|
899 |
+
fn = solver._accept_trial
|
900 |
+
# both solutions are feasible, select lower energy
|
901 |
+
assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
|
902 |
+
assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False)
|
903 |
+
assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
|
904 |
+
|
905 |
+
# trial is feasible, original is not
|
906 |
+
assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
|
907 |
+
|
908 |
+
# trial and original are infeasible
|
909 |
+
# cv_trial have to be <= cv_original to be better
|
910 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
911 |
+
1.0, False, np.array([1., 1.0])))
|
912 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
913 |
+
1.0, False, np.array([1., 0.50])))
|
914 |
+
assert not (fn(1.0, False, np.array([0.5, 0.5]),
|
915 |
+
1.0, False, np.array([1.0, 0.4])))
|
916 |
+
|
917 |
+
def test_constraint_wrapper(self):
|
918 |
+
lb = np.array([0, 20, 30])
|
919 |
+
ub = np.array([0.5, np.inf, 70])
|
920 |
+
x0 = np.array([1, 2, 3])
|
921 |
+
pc = _ConstraintWrapper(Bounds(lb, ub), x0)
|
922 |
+
assert (pc.violation(x0) > 0).any()
|
923 |
+
assert (pc.violation([0.25, 21, 31]) == 0).all()
|
924 |
+
|
925 |
+
# check vectorized Bounds constraint
|
926 |
+
xs = np.arange(1, 16).reshape(5, 3)
|
927 |
+
violations = []
|
928 |
+
for x in xs:
|
929 |
+
violations.append(pc.violation(x))
|
930 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
931 |
+
|
932 |
+
x0 = np.array([1, 2, 3, 4])
|
933 |
+
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
|
934 |
+
pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
|
935 |
+
assert (pc.violation(x0) > 0).any()
|
936 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
937 |
+
|
938 |
+
# check vectorized LinearConstraint, for 7 lots of parameter vectors
|
939 |
+
# with each parameter vector being 4 long, with 3 constraints
|
940 |
+
# xs is the same shape as stored in the differential evolution
|
941 |
+
# population, but it's sent to the violation function as (len(x), M)
|
942 |
+
xs = np.arange(1, 29).reshape(7, 4)
|
943 |
+
violations = []
|
944 |
+
for x in xs:
|
945 |
+
violations.append(pc.violation(x))
|
946 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
947 |
+
|
948 |
+
pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
|
949 |
+
x0)
|
950 |
+
assert (pc.violation(x0) > 0).any()
|
951 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
952 |
+
|
953 |
+
def fun(x):
|
954 |
+
return A.dot(x)
|
955 |
+
|
956 |
+
nonlinear = NonlinearConstraint(fun, -np.inf, 0)
|
957 |
+
pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
|
958 |
+
assert (pc.violation(x0) > 0).any()
|
959 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
960 |
+
|
961 |
+
def test_constraint_wrapper_violation(self):
|
962 |
+
def cons_f(x):
|
963 |
+
# written in vectorised form to accept an array of (N, S)
|
964 |
+
# returning (M, S)
|
965 |
+
# where N is the number of parameters,
|
966 |
+
# S is the number of solution vectors to be examined,
|
967 |
+
# and M is the number of constraint components
|
968 |
+
return np.array([x[0] ** 2 + x[1],
|
969 |
+
x[0] ** 2 - x[1]])
|
970 |
+
|
971 |
+
nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
|
972 |
+
pc = _ConstraintWrapper(nlc, [0.5, 1])
|
973 |
+
assert np.size(pc.bounds[0]) == 2
|
974 |
+
|
975 |
+
xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
|
976 |
+
vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
|
977 |
+
|
978 |
+
for x, v in zip(xs, vs):
|
979 |
+
assert_allclose(pc.violation(x), v)
|
980 |
+
|
981 |
+
# now check that we can vectorize the constraint wrapper
|
982 |
+
assert_allclose(pc.violation(np.array(xs).T),
|
983 |
+
np.array(vs).T)
|
984 |
+
assert pc.fun(np.array(xs).T).shape == (2, len(xs))
|
985 |
+
assert pc.violation(np.array(xs).T).shape == (2, len(xs))
|
986 |
+
assert pc.num_constr == 2
|
987 |
+
assert pc.parameter_count == 2
|
988 |
+
|
989 |
+
def test_matrix_linear_constraint(self):
|
990 |
+
# gh20041 supplying an np.matrix to construct a LinearConstraint caused
|
991 |
+
# _ConstraintWrapper to start returning constraint violations of the
|
992 |
+
# wrong shape.
|
993 |
+
with suppress_warnings() as sup:
|
994 |
+
sup.filter(PendingDeprecationWarning)
|
995 |
+
matrix = np.matrix([[1, 1, 1, 1.],
|
996 |
+
[2, 2, 2, 2.]])
|
997 |
+
lc = LinearConstraint(matrix, 0, 1)
|
998 |
+
x0 = np.ones(4)
|
999 |
+
cw = _ConstraintWrapper(lc, x0)
|
1000 |
+
# the shape of the constraint violation should be the same as the number
|
1001 |
+
# of constraints applied.
|
1002 |
+
assert cw.violation(x0).shape == (2,)
|
1003 |
+
|
1004 |
+
# let's try a vectorised violation call.
|
1005 |
+
xtrial = np.arange(4 * 5).reshape(4, 5)
|
1006 |
+
assert cw.violation(xtrial).shape == (2, 5)
|
1007 |
+
|
1008 |
+
|
1009 |
+
def test_L1(self):
|
1010 |
+
# Lampinen ([5]) test problem 1
|
1011 |
+
|
1012 |
+
def f(x):
|
1013 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1014 |
+
fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
|
1015 |
+
return fun
|
1016 |
+
|
1017 |
+
A = np.zeros((10, 14)) # 1-indexed to match reference
|
1018 |
+
A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
|
1019 |
+
A[2, [1, 10]] = -8, 1
|
1020 |
+
A[3, [4, 5, 10]] = -2, -1, 1
|
1021 |
+
A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
|
1022 |
+
A[5, [2, 11]] = -8, 1
|
1023 |
+
A[6, [6, 7, 11]] = -2, -1, 1
|
1024 |
+
A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
|
1025 |
+
A[8, [3, 12]] = -8, 1
|
1026 |
+
A[9, [8, 9, 12]] = -2, -1, 1
|
1027 |
+
A = A[1:, 1:]
|
1028 |
+
|
1029 |
+
b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
|
1030 |
+
|
1031 |
+
L = LinearConstraint(A, -np.inf, b)
|
1032 |
+
|
1033 |
+
bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
|
1034 |
+
|
1035 |
+
# using a lower popsize to speed the test up
|
1036 |
+
res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
|
1037 |
+
constraints=(L), popsize=2)
|
1038 |
+
|
1039 |
+
x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
|
1040 |
+
f_opt = -15
|
1041 |
+
|
1042 |
+
assert_allclose(f(x_opt), f_opt, atol=6e-4)
|
1043 |
+
assert res.success
|
1044 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
1045 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1046 |
+
assert_(np.all([email protected] <= b))
|
1047 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1048 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1049 |
+
|
1050 |
+
# now repeat the same solve, using the same overall constraints,
|
1051 |
+
# but using a sparse matrix for the LinearConstraint instead of an
|
1052 |
+
# array
|
1053 |
+
|
1054 |
+
L = LinearConstraint(csr_matrix(A), -np.inf, b)
|
1055 |
+
|
1056 |
+
# using a lower popsize to speed the test up
|
1057 |
+
res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
|
1058 |
+
constraints=(L), popsize=2)
|
1059 |
+
|
1060 |
+
assert_allclose(f(x_opt), f_opt)
|
1061 |
+
assert res.success
|
1062 |
+
assert_allclose(res.x, x_opt, atol=5e-4)
|
1063 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1064 |
+
assert_(np.all([email protected] <= b))
|
1065 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1066 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1067 |
+
|
1068 |
+
# now repeat the same solve, using the same overall constraints,
|
1069 |
+
# but specify half the constraints in terms of LinearConstraint,
|
1070 |
+
# and the other half by NonlinearConstraint
|
1071 |
+
def c1(x):
|
1072 |
+
x = np.hstack(([0], x))
|
1073 |
+
return [2*x[2] + 2*x[3] + x[11] + x[12],
|
1074 |
+
-8*x[3] + x[12]]
|
1075 |
+
|
1076 |
+
def c2(x):
|
1077 |
+
x = np.hstack(([0], x))
|
1078 |
+
return -2*x[8] - x[9] + x[12]
|
1079 |
+
|
1080 |
+
L = LinearConstraint(A[:5, :], -np.inf, b[:5])
|
1081 |
+
L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
|
1082 |
+
N = NonlinearConstraint(c1, -np.inf, b[6:8])
|
1083 |
+
N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
|
1084 |
+
constraints = (L, N, L2, N2)
|
1085 |
+
|
1086 |
+
with suppress_warnings() as sup:
|
1087 |
+
sup.filter(UserWarning)
|
1088 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1089 |
+
seed=1234, constraints=constraints,
|
1090 |
+
popsize=2)
|
1091 |
+
|
1092 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
1093 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
1094 |
+
assert_(np.all([email protected] <= b))
|
1095 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1096 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1097 |
+
|
1098 |
+
def test_L2(self):
|
1099 |
+
# Lampinen ([5]) test problem 2
|
1100 |
+
|
1101 |
+
def f(x):
|
1102 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1103 |
+
fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
|
1104 |
+
10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
|
1105 |
+
8*x[7])
|
1106 |
+
return fun
|
1107 |
+
|
1108 |
+
def c1(x):
|
1109 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1110 |
+
return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
|
1111 |
+
196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
|
1112 |
+
282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
|
1113 |
+
-4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
|
1114 |
+
5*x[6] + 11*x[7]]
|
1115 |
+
|
1116 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1117 |
+
bounds = [(-10, 10)]*7
|
1118 |
+
constraints = (N)
|
1119 |
+
|
1120 |
+
with suppress_warnings() as sup:
|
1121 |
+
sup.filter(UserWarning)
|
1122 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1123 |
+
seed=1234, constraints=constraints)
|
1124 |
+
|
1125 |
+
f_opt = 680.6300599487869
|
1126 |
+
x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
|
1127 |
+
-0.6244870, 1.038131, 1.594227)
|
1128 |
+
|
1129 |
+
assert_allclose(f(x_opt), f_opt)
|
1130 |
+
assert_allclose(res.fun, f_opt)
|
1131 |
+
assert_allclose(res.x, x_opt, atol=1e-5)
|
1132 |
+
assert res.success
|
1133 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1134 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1135 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1136 |
+
|
1137 |
+
def test_L3(self):
|
1138 |
+
# Lampinen ([5]) test problem 3
|
1139 |
+
|
1140 |
+
def f(x):
|
1141 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1142 |
+
fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
|
1143 |
+
(x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
|
1144 |
+
5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
|
1145 |
+
(x[10] - 7)**2 + 45
|
1146 |
+
)
|
1147 |
+
return fun # maximize
|
1148 |
+
|
1149 |
+
A = np.zeros((4, 11))
|
1150 |
+
A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
|
1151 |
+
A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
|
1152 |
+
A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
|
1153 |
+
A = A[1:, 1:]
|
1154 |
+
b = np.array([-105, 0, -12])
|
1155 |
+
|
1156 |
+
def c1(x):
|
1157 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1158 |
+
return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
|
1159 |
+
-3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
|
1160 |
+
-x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
|
1161 |
+
-5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
|
1162 |
+
-0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
|
1163 |
+
|
1164 |
+
L = LinearConstraint(A, b, np.inf)
|
1165 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1166 |
+
bounds = [(-10, 10)]*10
|
1167 |
+
constraints = (L, N)
|
1168 |
+
|
1169 |
+
with suppress_warnings() as sup:
|
1170 |
+
sup.filter(UserWarning)
|
1171 |
+
res = differential_evolution(f, bounds, seed=1234,
|
1172 |
+
constraints=constraints, popsize=3)
|
1173 |
+
|
1174 |
+
x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
|
1175 |
+
1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
|
1176 |
+
f_opt = 24.3062091
|
1177 |
+
|
1178 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-5)
|
1179 |
+
assert_allclose(res.x, x_opt, atol=1e-6)
|
1180 |
+
assert_allclose(res.fun, f_opt, atol=1e-5)
|
1181 |
+
assert res.success
|
1182 |
+
assert_(np.all(A @ res.x >= b))
|
1183 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1184 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1185 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1186 |
+
|
1187 |
+
def test_L4(self):
|
1188 |
+
# Lampinen ([5]) test problem 4
|
1189 |
+
def f(x):
|
1190 |
+
return np.sum(x[:3])
|
1191 |
+
|
1192 |
+
A = np.zeros((4, 9))
|
1193 |
+
A[1, [4, 6]] = 0.0025, 0.0025
|
1194 |
+
A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
|
1195 |
+
A[3, [8, 5]] = 0.01, -0.01
|
1196 |
+
A = A[1:, 1:]
|
1197 |
+
b = np.array([1, 1, 1])
|
1198 |
+
|
1199 |
+
def c1(x):
|
1200 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1201 |
+
return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
|
1202 |
+
x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
|
1203 |
+
x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
|
1204 |
+
|
1205 |
+
L = LinearConstraint(A, -np.inf, 1)
|
1206 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1207 |
+
|
1208 |
+
bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
|
1209 |
+
constraints = (L, N)
|
1210 |
+
|
1211 |
+
with suppress_warnings() as sup:
|
1212 |
+
sup.filter(UserWarning)
|
1213 |
+
res = differential_evolution(f, bounds, strategy='rand1bin',
|
1214 |
+
seed=1234, constraints=constraints,
|
1215 |
+
popsize=3)
|
1216 |
+
|
1217 |
+
f_opt = 7049.248
|
1218 |
+
|
1219 |
+
x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
|
1220 |
+
217.9823, 286.416528, 395.601172]
|
1221 |
+
|
1222 |
+
assert_allclose(f(x_opt), f_opt, atol=0.001)
|
1223 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
1224 |
+
|
1225 |
+
# use higher tol here for 32-bit Windows, see gh-11693
|
1226 |
+
if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
|
1227 |
+
assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
|
1228 |
+
else:
|
1229 |
+
# tolerance determined from macOS + MKL failure, see gh-12701
|
1230 |
+
assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
|
1231 |
+
|
1232 |
+
assert res.success
|
1233 |
+
assert_(np.all(A @ res.x <= b))
|
1234 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1235 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1236 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1237 |
+
|
1238 |
+
def test_L5(self):
|
1239 |
+
# Lampinen ([5]) test problem 5
|
1240 |
+
|
1241 |
+
def f(x):
|
1242 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1243 |
+
fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
|
1244 |
+
(x[1]**3*(x[1]+x[2])))
|
1245 |
+
return -fun # maximize
|
1246 |
+
|
1247 |
+
def c1(x):
|
1248 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1249 |
+
return [x[1]**2 - x[2] + 1,
|
1250 |
+
1 - x[1] + (x[2]-4)**2]
|
1251 |
+
|
1252 |
+
N = NonlinearConstraint(c1, -np.inf, 0)
|
1253 |
+
bounds = [(0, 10)]*2
|
1254 |
+
constraints = (N)
|
1255 |
+
|
1256 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1257 |
+
constraints=constraints)
|
1258 |
+
|
1259 |
+
x_opt = (1.22797135, 4.24537337)
|
1260 |
+
f_opt = -0.095825
|
1261 |
+
assert_allclose(f(x_opt), f_opt, atol=2e-5)
|
1262 |
+
assert_allclose(res.fun, f_opt, atol=1e-4)
|
1263 |
+
assert res.success
|
1264 |
+
assert_(np.all(np.array(c1(res.x)) <= 0))
|
1265 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1266 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1267 |
+
|
1268 |
+
def test_L6(self):
|
1269 |
+
# Lampinen ([5]) test problem 6
|
1270 |
+
def f(x):
|
1271 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1272 |
+
fun = (x[1]-10)**3 + (x[2] - 20)**3
|
1273 |
+
return fun
|
1274 |
+
|
1275 |
+
def c1(x):
|
1276 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1277 |
+
return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
|
1278 |
+
-(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
|
1279 |
+
|
1280 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
1281 |
+
bounds = [(13, 100), (0, 100)]
|
1282 |
+
constraints = (N)
|
1283 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1284 |
+
constraints=constraints, tol=1e-7)
|
1285 |
+
x_opt = (14.095, 0.84296)
|
1286 |
+
f_opt = -6961.814744
|
1287 |
+
|
1288 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-6)
|
1289 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
1290 |
+
assert_allclose(res.x, x_opt, atol=1e-4)
|
1291 |
+
assert res.success
|
1292 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
1293 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1294 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1295 |
+
|
1296 |
+
def test_L7(self):
|
1297 |
+
# Lampinen ([5]) test problem 7
|
1298 |
+
def f(x):
|
1299 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1300 |
+
fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
|
1301 |
+
37.293239*x[1] - 40792.141)
|
1302 |
+
return fun
|
1303 |
+
|
1304 |
+
def c1(x):
|
1305 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1306 |
+
return [
|
1307 |
+
85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
|
1308 |
+
0.0022053*x[3]*x[5],
|
1309 |
+
|
1310 |
+
80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
|
1311 |
+
0.0021813*x[3]**2,
|
1312 |
+
|
1313 |
+
9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
|
1314 |
+
0.0019085*x[3]*x[4]
|
1315 |
+
]
|
1316 |
+
|
1317 |
+
N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
|
1318 |
+
|
1319 |
+
bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
|
1320 |
+
constraints = (N)
|
1321 |
+
|
1322 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1323 |
+
constraints=constraints)
|
1324 |
+
|
1325 |
+
# using our best solution, rather than Lampinen/Koziel. Koziel solution
|
1326 |
+
# doesn't satisfy constraints, Lampinen f_opt just plain wrong.
|
1327 |
+
x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
|
1328 |
+
36.77579979]
|
1329 |
+
|
1330 |
+
f_opt = -30665.537578
|
1331 |
+
|
1332 |
+
assert_allclose(f(x_opt), f_opt)
|
1333 |
+
assert_allclose(res.x, x_opt, atol=1e-3)
|
1334 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
1335 |
+
|
1336 |
+
assert res.success
|
1337 |
+
assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
|
1338 |
+
assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
|
1339 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1340 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1341 |
+
|
1342 |
+
@pytest.mark.slow
|
1343 |
+
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
1344 |
+
reason="fails on ppc64le")
|
1345 |
+
def test_L8(self):
|
1346 |
+
def f(x):
|
1347 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1348 |
+
fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
|
1349 |
+
return fun
|
1350 |
+
|
1351 |
+
A = np.zeros((3, 5))
|
1352 |
+
A[1, [4, 3]] = 1, -1
|
1353 |
+
A[2, [3, 4]] = 1, -1
|
1354 |
+
A = A[1:, 1:]
|
1355 |
+
b = np.array([-.55, -.55])
|
1356 |
+
|
1357 |
+
def c1(x):
|
1358 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1359 |
+
return [
|
1360 |
+
1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
|
1361 |
+
894.8 - x[1],
|
1362 |
+
1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
|
1363 |
+
894.8 - x[2],
|
1364 |
+
1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
|
1365 |
+
1294.8
|
1366 |
+
]
|
1367 |
+
L = LinearConstraint(A, b, np.inf)
|
1368 |
+
N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
|
1369 |
+
|
1370 |
+
bounds = [(0, 1200)]*2+[(-.55, .55)]*2
|
1371 |
+
constraints = (L, N)
|
1372 |
+
|
1373 |
+
with suppress_warnings() as sup:
|
1374 |
+
sup.filter(UserWarning)
|
1375 |
+
# original Lampinen test was with rand1bin, but that takes a
|
1376 |
+
# huge amount of CPU time. Changing strategy to best1bin speeds
|
1377 |
+
# things up a lot
|
1378 |
+
res = differential_evolution(f, bounds, strategy='best1bin',
|
1379 |
+
seed=1234, constraints=constraints,
|
1380 |
+
maxiter=5000)
|
1381 |
+
|
1382 |
+
x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
|
1383 |
+
f_opt = 5126.4981
|
1384 |
+
|
1385 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-3)
|
1386 |
+
assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
|
1387 |
+
assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
|
1388 |
+
assert_allclose(res.fun, f_opt, atol=2e-2)
|
1389 |
+
assert res.success
|
1390 |
+
assert_(np.all([email protected] >= b))
|
1391 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
1392 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
1393 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1394 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1395 |
+
|
1396 |
+
def test_L9(self):
|
1397 |
+
# Lampinen ([5]) test problem 9
|
1398 |
+
|
1399 |
+
def f(x):
|
1400 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1401 |
+
return x[1]**2 + (x[2]-1)**2
|
1402 |
+
|
1403 |
+
def c1(x):
|
1404 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
1405 |
+
return [x[2] - x[1]**2]
|
1406 |
+
|
1407 |
+
N = NonlinearConstraint(c1, [-.001], [0.001])
|
1408 |
+
|
1409 |
+
bounds = [(-1, 1)]*2
|
1410 |
+
constraints = (N)
|
1411 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
|
1412 |
+
constraints=constraints)
|
1413 |
+
|
1414 |
+
x_opt = [np.sqrt(2)/2, 0.5]
|
1415 |
+
f_opt = 0.75
|
1416 |
+
|
1417 |
+
assert_allclose(f(x_opt), f_opt)
|
1418 |
+
assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
|
1419 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
1420 |
+
assert res.success
|
1421 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
1422 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
1423 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
1424 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
1425 |
+
|
1426 |
+
def test_integrality(self):
|
1427 |
+
# test fitting discrete distribution to data
|
1428 |
+
rng = np.random.default_rng(6519843218105)
|
1429 |
+
dist = stats.nbinom
|
1430 |
+
shapes = (5, 0.5)
|
1431 |
+
x = dist.rvs(*shapes, size=10000, random_state=rng)
|
1432 |
+
|
1433 |
+
def func(p, *args):
|
1434 |
+
dist, x = args
|
1435 |
+
# negative log-likelihood function
|
1436 |
+
ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
|
1437 |
+
if np.isnan(ll): # occurs when x is outside of support
|
1438 |
+
ll = np.inf # we don't want that
|
1439 |
+
return ll
|
1440 |
+
|
1441 |
+
integrality = [True, False]
|
1442 |
+
bounds = [(1, 18), (0, 0.95)]
|
1443 |
+
|
1444 |
+
res = differential_evolution(func, bounds, args=(dist, x),
|
1445 |
+
integrality=integrality, polish=False,
|
1446 |
+
seed=rng)
|
1447 |
+
# tolerance has to be fairly relaxed for the second parameter
|
1448 |
+
# because we're fitting a distribution to random variates.
|
1449 |
+
assert res.x[0] == 5
|
1450 |
+
assert_allclose(res.x, shapes, rtol=0.025)
|
1451 |
+
|
1452 |
+
# check that we can still use integrality constraints with polishing
|
1453 |
+
res2 = differential_evolution(func, bounds, args=(dist, x),
|
1454 |
+
integrality=integrality, polish=True,
|
1455 |
+
seed=rng)
|
1456 |
+
|
1457 |
+
def func2(p, *args):
|
1458 |
+
n, dist, x = args
|
1459 |
+
return func(np.array([n, p[0]]), dist, x)
|
1460 |
+
|
1461 |
+
# compare the DE derived solution to an LBFGSB solution (that doesn't
|
1462 |
+
# have to find the integral values). Note we're setting x0 to be the
|
1463 |
+
# output from the first DE result, thereby making the polishing step
|
1464 |
+
# and this minimisation pretty much equivalent.
|
1465 |
+
LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
|
1466 |
+
bounds=[(0, 0.95)])
|
1467 |
+
assert_allclose(res2.x[1], LBFGSB.x)
|
1468 |
+
assert res2.fun <= res.fun
|
1469 |
+
|
1470 |
+
def test_integrality_limits(self):
|
1471 |
+
def f(x):
|
1472 |
+
return x
|
1473 |
+
|
1474 |
+
integrality = [True, False, True]
|
1475 |
+
bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
|
1476 |
+
|
1477 |
+
# no integrality constraints
|
1478 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1479 |
+
integrality=False)
|
1480 |
+
assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
|
1481 |
+
assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
|
1482 |
+
|
1483 |
+
# with integrality constraints
|
1484 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1485 |
+
integrality=integrality)
|
1486 |
+
assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
|
1487 |
+
assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
|
1488 |
+
assert_equal(solver.integrality, [True, False, True])
|
1489 |
+
assert solver.polish is False
|
1490 |
+
|
1491 |
+
bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
|
1492 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1493 |
+
integrality=integrality)
|
1494 |
+
assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
|
1495 |
+
assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
|
1496 |
+
|
1497 |
+
# A lower bound of -1.2 is converted to
|
1498 |
+
# np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
|
1499 |
+
# with a similar process to the upper bound. Check that the
|
1500 |
+
# conversions work
|
1501 |
+
assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
|
1502 |
+
assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
|
1503 |
+
|
1504 |
+
bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
|
1505 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1506 |
+
integrality=integrality)
|
1507 |
+
assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
|
1508 |
+
assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
|
1509 |
+
|
1510 |
+
bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
|
1511 |
+
with pytest.raises(ValueError, match='One of the integrality'):
|
1512 |
+
DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
1513 |
+
integrality=integrality)
|
1514 |
+
|
1515 |
+
def test_vectorized(self):
|
1516 |
+
def quadratic(x):
|
1517 |
+
return np.sum(x**2)
|
1518 |
+
|
1519 |
+
def quadratic_vec(x):
|
1520 |
+
return np.sum(x**2, axis=0)
|
1521 |
+
|
1522 |
+
# A vectorized function needs to accept (len(x), S) and return (S,)
|
1523 |
+
with pytest.raises(RuntimeError, match='The vectorized function'):
|
1524 |
+
differential_evolution(quadratic, self.bounds,
|
1525 |
+
vectorized=True, updating='deferred')
|
1526 |
+
|
1527 |
+
# vectorized overrides the updating keyword, check for warning
|
1528 |
+
with warns(UserWarning, match="differential_evolution: the 'vector"):
|
1529 |
+
differential_evolution(quadratic_vec, self.bounds,
|
1530 |
+
vectorized=True)
|
1531 |
+
|
1532 |
+
# vectorized defers to the workers keyword, check for warning
|
1533 |
+
with warns(UserWarning, match="differential_evolution: the 'workers"):
|
1534 |
+
differential_evolution(quadratic_vec, self.bounds,
|
1535 |
+
vectorized=True, workers=map,
|
1536 |
+
updating='deferred')
|
1537 |
+
|
1538 |
+
ncalls = [0]
|
1539 |
+
|
1540 |
+
def rosen_vec(x):
|
1541 |
+
ncalls[0] += 1
|
1542 |
+
return rosen(x)
|
1543 |
+
|
1544 |
+
bounds = [(0, 10), (0, 10)]
|
1545 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
1546 |
+
seed=1)
|
1547 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
1548 |
+
updating='deferred', seed=1)
|
1549 |
+
|
1550 |
+
# the two minimisation runs should be functionally equivalent
|
1551 |
+
assert_allclose(res1.x, res2.x)
|
1552 |
+
assert ncalls[0] == res2.nfev
|
1553 |
+
assert res1.nit == res2.nit
|
1554 |
+
|
1555 |
+
def test_vectorized_constraints(self):
|
1556 |
+
def constr_f(x):
|
1557 |
+
return np.array([x[0] + x[1]])
|
1558 |
+
|
1559 |
+
def constr_f2(x):
|
1560 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
1561 |
+
|
1562 |
+
nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
1563 |
+
nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
|
1564 |
+
|
1565 |
+
def rosen_vec(x):
|
1566 |
+
# accept an (len(x0), S) array, returning a (S,) array
|
1567 |
+
v = 100 * (x[1:] - x[:-1]**2.0)**2.0
|
1568 |
+
v += (1 - x[:-1])**2.0
|
1569 |
+
return np.squeeze(v)
|
1570 |
+
|
1571 |
+
bounds = [(0, 10), (0, 10)]
|
1572 |
+
|
1573 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
1574 |
+
seed=1, constraints=[nlc1, nlc2],
|
1575 |
+
polish=False)
|
1576 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
1577 |
+
updating='deferred', seed=1,
|
1578 |
+
constraints=[nlc1, nlc2],
|
1579 |
+
polish=False)
|
1580 |
+
# the two minimisation runs should be functionally equivalent
|
1581 |
+
assert_allclose(res1.x, res2.x)
|
1582 |
+
|
1583 |
+
def test_constraint_violation_error_message(self):
|
1584 |
+
|
1585 |
+
def func(x):
|
1586 |
+
return np.cos(x[0]) + np.sin(x[1])
|
1587 |
+
|
1588 |
+
# Intentionally infeasible constraints.
|
1589 |
+
c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
|
1590 |
+
c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
|
1591 |
+
|
1592 |
+
result = differential_evolution(func,
|
1593 |
+
bounds=[(-1, 2), (-1, 1)],
|
1594 |
+
constraints=[c0, c1],
|
1595 |
+
maxiter=10,
|
1596 |
+
polish=False,
|
1597 |
+
seed=864197532)
|
1598 |
+
assert result.success is False
|
1599 |
+
# The numerical value in the error message might be sensitive to
|
1600 |
+
# changes in the implementation. It can be updated if the code is
|
1601 |
+
# changed. The essential part of the test is that there is a number
|
1602 |
+
# after the '=', so if necessary, the text could be reduced to, say,
|
1603 |
+
# "MAXCV = 0.".
|
1604 |
+
assert "MAXCV = 0.414" in result.message
|
1605 |
+
|
1606 |
+
def test_strategy_fn(self):
|
1607 |
+
# examines ability to customize strategy by mimicking one of the
|
1608 |
+
# in-built strategies and comparing to the actual in-built strategy.
|
1609 |
+
parameter_count = 4
|
1610 |
+
popsize = 10
|
1611 |
+
bounds = [(0, 10.)] * parameter_count
|
1612 |
+
total_popsize = parameter_count * popsize
|
1613 |
+
mutation = 0.8
|
1614 |
+
recombination = 0.7
|
1615 |
+
|
1616 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
1617 |
+
trial = np.copy(population[candidate])
|
1618 |
+
fill_point = rng.choice(parameter_count)
|
1619 |
+
|
1620 |
+
pool = np.arange(total_popsize)
|
1621 |
+
rng.shuffle(pool)
|
1622 |
+
|
1623 |
+
idxs = []
|
1624 |
+
while len(idxs) < 2 and len(pool) > 0:
|
1625 |
+
idx = pool[0]
|
1626 |
+
pool = pool[1:]
|
1627 |
+
if idx != candidate:
|
1628 |
+
idxs.append(idx)
|
1629 |
+
|
1630 |
+
r0, r1 = idxs[:2]
|
1631 |
+
|
1632 |
+
bprime = (population[0] + mutation *
|
1633 |
+
(population[r0] - population[r1]))
|
1634 |
+
|
1635 |
+
crossovers = rng.uniform(size=parameter_count)
|
1636 |
+
crossovers = crossovers < recombination
|
1637 |
+
crossovers[fill_point] = True
|
1638 |
+
trial = np.where(crossovers, bprime, trial)
|
1639 |
+
return trial
|
1640 |
+
|
1641 |
+
solver = DifferentialEvolutionSolver(
|
1642 |
+
rosen,
|
1643 |
+
bounds,
|
1644 |
+
popsize=popsize,
|
1645 |
+
recombination=recombination,
|
1646 |
+
mutation=mutation,
|
1647 |
+
maxiter=2,
|
1648 |
+
strategy=custom_strategy_fn,
|
1649 |
+
seed=10,
|
1650 |
+
polish=False
|
1651 |
+
)
|
1652 |
+
assert solver.strategy is custom_strategy_fn
|
1653 |
+
res = solver.solve()
|
1654 |
+
|
1655 |
+
res2 = differential_evolution(
|
1656 |
+
rosen,
|
1657 |
+
bounds,
|
1658 |
+
mutation=mutation,
|
1659 |
+
popsize=popsize,
|
1660 |
+
recombination=recombination,
|
1661 |
+
maxiter=2,
|
1662 |
+
strategy='best1bin',
|
1663 |
+
polish=False,
|
1664 |
+
seed=10
|
1665 |
+
)
|
1666 |
+
assert_allclose(res.population, res2.population)
|
1667 |
+
assert_allclose(res.x, res2.x)
|
1668 |
+
|
1669 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
1670 |
+
return np.array([1.0, 2.0])
|
1671 |
+
|
1672 |
+
with pytest.raises(RuntimeError, match="strategy*"):
|
1673 |
+
differential_evolution(
|
1674 |
+
rosen,
|
1675 |
+
bounds,
|
1676 |
+
strategy=custom_strategy_fn
|
1677 |
+
)
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for optimization routines from _root.py.
|
3 |
+
"""
|
4 |
+
from numpy.testing import assert_, assert_equal
|
5 |
+
import pytest
|
6 |
+
from pytest import raises as assert_raises, warns as assert_warns
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from scipy.optimize import root
|
10 |
+
|
11 |
+
|
12 |
+
class TestRoot:
|
13 |
+
def test_tol_parameter(self):
|
14 |
+
# Check that the minimize() tol= argument does something
|
15 |
+
def func(z):
|
16 |
+
x, y = z
|
17 |
+
return np.array([x**3 - 1, y**3 - 1])
|
18 |
+
|
19 |
+
def dfunc(z):
|
20 |
+
x, y = z
|
21 |
+
return np.array([[3*x**2, 0], [0, 3*y**2]])
|
22 |
+
|
23 |
+
for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
|
24 |
+
'diagbroyden', 'krylov']:
|
25 |
+
if method in ('linearmixing', 'excitingmixing'):
|
26 |
+
# doesn't converge
|
27 |
+
continue
|
28 |
+
|
29 |
+
if method in ('hybr', 'lm'):
|
30 |
+
jac = dfunc
|
31 |
+
else:
|
32 |
+
jac = None
|
33 |
+
|
34 |
+
sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
|
35 |
+
sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
|
36 |
+
msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
|
37 |
+
assert_(sol1.success, msg)
|
38 |
+
assert_(sol2.success, msg)
|
39 |
+
assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
|
40 |
+
msg)
|
41 |
+
|
42 |
+
def test_tol_norm(self):
|
43 |
+
|
44 |
+
def norm(x):
|
45 |
+
return abs(x[0])
|
46 |
+
|
47 |
+
for method in ['excitingmixing',
|
48 |
+
'diagbroyden',
|
49 |
+
'linearmixing',
|
50 |
+
'anderson',
|
51 |
+
'broyden1',
|
52 |
+
'broyden2',
|
53 |
+
'krylov']:
|
54 |
+
|
55 |
+
root(np.zeros_like, np.zeros(2), method=method,
|
56 |
+
options={"tol_norm": norm})
|
57 |
+
|
58 |
+
def test_minimize_scalar_coerce_args_param(self):
|
59 |
+
# github issue #3503
|
60 |
+
def func(z, f=1):
|
61 |
+
x, y = z
|
62 |
+
return np.array([x**3 - 1, y**3 - f])
|
63 |
+
root(func, [1.1, 1.1], args=1.5)
|
64 |
+
|
65 |
+
def test_f_size(self):
|
66 |
+
# gh8320
|
67 |
+
# check that decreasing the size of the returned array raises an error
|
68 |
+
# and doesn't segfault
|
69 |
+
class fun:
|
70 |
+
def __init__(self):
|
71 |
+
self.count = 0
|
72 |
+
|
73 |
+
def __call__(self, x):
|
74 |
+
self.count += 1
|
75 |
+
|
76 |
+
if not (self.count % 5):
|
77 |
+
ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
|
78 |
+
else:
|
79 |
+
ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
|
80 |
+
0.5 * (x[1] - x[0]) ** 3 + x[1]])
|
81 |
+
|
82 |
+
return ret
|
83 |
+
|
84 |
+
F = fun()
|
85 |
+
with assert_raises(ValueError):
|
86 |
+
root(F, [0.1, 0.0], method='lm')
|
87 |
+
|
88 |
+
def test_gh_10370(self):
|
89 |
+
# gh-10370 reported that passing both `args` and `jac` to `root` with
|
90 |
+
# `method='krylov'` caused a failure. Ensure that this is fixed whether
|
91 |
+
# the gradient is passed via `jac` or as a second output of `fun`.
|
92 |
+
def fun(x, ignored):
|
93 |
+
return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2]
|
94 |
+
|
95 |
+
def grad(x, ignored):
|
96 |
+
return [[3, 0.5 * x[1]], [0.2 * x[0], 5]]
|
97 |
+
|
98 |
+
def fun_grad(x, ignored):
|
99 |
+
return fun(x, ignored), grad(x, ignored)
|
100 |
+
|
101 |
+
x0 = np.zeros(2)
|
102 |
+
|
103 |
+
ref = root(fun, x0, args=(1,), method='krylov')
|
104 |
+
message = 'Method krylov does not use the jacobian'
|
105 |
+
with assert_warns(RuntimeWarning, match=message):
|
106 |
+
res1 = root(fun, x0, args=(1,), method='krylov', jac=grad)
|
107 |
+
with assert_warns(RuntimeWarning, match=message):
|
108 |
+
res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True)
|
109 |
+
|
110 |
+
assert_equal(res1.x, ref.x)
|
111 |
+
assert_equal(res2.x, ref.x)
|
112 |
+
assert res1.success is res2.success is ref.success is True
|
113 |
+
|
114 |
+
@pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2",
|
115 |
+
"anderson", "linearmixing",
|
116 |
+
"diagbroyden", "excitingmixing",
|
117 |
+
"krylov", "df-sane"])
|
118 |
+
def test_method_in_result(self, method):
|
119 |
+
def func(x):
|
120 |
+
return x - 1
|
121 |
+
|
122 |
+
res = root(func, x0=[1], method=method)
|
123 |
+
assert res.method == method
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_allclose, assert_equal
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from scipy.optimize._pava_pybind import pava
|
6 |
+
from scipy.optimize import isotonic_regression
|
7 |
+
|
8 |
+
|
9 |
+
class TestIsotonicRegression:
|
10 |
+
@pytest.mark.parametrize(
|
11 |
+
("y", "w", "msg"),
|
12 |
+
[
|
13 |
+
([[0, 1]], None,
|
14 |
+
"array has incorrect number of dimensions: 2; expected 1"),
|
15 |
+
([0, 1], [[1, 2]],
|
16 |
+
"Input arrays y and w must have one dimension of equal length"),
|
17 |
+
([0, 1], [1],
|
18 |
+
"Input arrays y and w must have one dimension of equal length"),
|
19 |
+
(1, 2,
|
20 |
+
"Input arrays y and w must have one dimension of equal length"),
|
21 |
+
([0, 1], [0, 1],
|
22 |
+
"Weights w must be strictly positive"),
|
23 |
+
]
|
24 |
+
)
|
25 |
+
def test_raise_error(self, y, w, msg):
|
26 |
+
with pytest.raises(ValueError, match=msg):
|
27 |
+
isotonic_regression(y=y, weights=w)
|
28 |
+
|
29 |
+
def test_simple_pava(self):
|
30 |
+
# Test case of Busing 2020
|
31 |
+
# https://doi.org/10.18637/jss.v102.c01
|
32 |
+
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)
|
33 |
+
w = np.ones_like(y)
|
34 |
+
r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp)
|
35 |
+
pava(y, w, r)
|
36 |
+
assert_allclose(y, [4, 4, 4, 4, 4, 4, 8])
|
37 |
+
# Only first 2 elements of w are changed.
|
38 |
+
assert_allclose(w, [6, 1, 1, 1, 1, 1, 1])
|
39 |
+
# Only first 3 elements of r are changed.
|
40 |
+
assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1])
|
41 |
+
|
42 |
+
@pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32])
|
43 |
+
@pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32])
|
44 |
+
@pytest.mark.parametrize("w", [None, "ones"])
|
45 |
+
def test_simple_isotonic_regression(self, w, w_dtype, y_dtype):
|
46 |
+
# Test case of Busing 2020
|
47 |
+
# https://doi.org/10.18637/jss.v102.c01
|
48 |
+
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype)
|
49 |
+
if w is not None:
|
50 |
+
w = np.ones_like(y, dtype=w_dtype)
|
51 |
+
res = isotonic_regression(y, weights=w)
|
52 |
+
assert res.x.dtype == np.float64
|
53 |
+
assert res.weights.dtype == np.float64
|
54 |
+
assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8])
|
55 |
+
assert_allclose(res.weights, [6, 1])
|
56 |
+
assert_allclose(res.blocks, [0, 6, 7])
|
57 |
+
# Assert that y was not overwritten
|
58 |
+
assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64))
|
59 |
+
|
60 |
+
@pytest.mark.parametrize("increasing", [True, False])
|
61 |
+
def test_linspace(self, increasing):
|
62 |
+
n = 10
|
63 |
+
y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n)
|
64 |
+
res = isotonic_regression(y, increasing=increasing)
|
65 |
+
assert_allclose(res.x, y)
|
66 |
+
assert_allclose(res.blocks, np.arange(n + 1))
|
67 |
+
|
68 |
+
def test_weights(self):
|
69 |
+
w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3])
|
70 |
+
y = np.array([3, 2, 1, 10, 9, 8, 20, 10])
|
71 |
+
res = isotonic_regression(y, weights=w)
|
72 |
+
assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4])
|
73 |
+
assert_allclose(res.weights, [8, 1.5, 4])
|
74 |
+
assert_allclose(res.blocks, [0, 3, 6, 8])
|
75 |
+
|
76 |
+
# weights are like repeated observations, we repeat the 3rd element 5
|
77 |
+
# times.
|
78 |
+
w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3])
|
79 |
+
y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10])
|
80 |
+
res2 = isotonic_regression(y2, weights=w2)
|
81 |
+
assert_allclose(np.diff(res2.x[0:7]), 0)
|
82 |
+
assert_allclose(res2.x[4:], res.x)
|
83 |
+
assert_allclose(res2.weights, res.weights)
|
84 |
+
assert_allclose(res2.blocks[1:] - 4, res.blocks[1:])
|
85 |
+
|
86 |
+
def test_against_R_monotone(self):
|
87 |
+
y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4]
|
88 |
+
res = isotonic_regression(y)
|
89 |
+
# R code
|
90 |
+
# library(monotone)
|
91 |
+
# options(digits=8)
|
92 |
+
# monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4))
|
93 |
+
x_R = [
|
94 |
+
0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667,
|
95 |
+
4.1666667, 6.6666667, 6.6666667, 6.6666667,
|
96 |
+
]
|
97 |
+
assert_allclose(res.x, x_R)
|
98 |
+
assert_equal(res.blocks, [0, 1, 7, 10])
|
99 |
+
|
100 |
+
n = 100
|
101 |
+
y = np.linspace(0, 1, num=n, endpoint=False)
|
102 |
+
y = 5 * y + np.sin(10 * y)
|
103 |
+
res = isotonic_regression(y)
|
104 |
+
# R code
|
105 |
+
# library(monotone)
|
106 |
+
# n <- 100
|
107 |
+
# y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n)
|
108 |
+
# options(digits=8)
|
109 |
+
# monotone(y)
|
110 |
+
x_R = [
|
111 |
+
0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554,
|
112 |
+
0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736,
|
113 |
+
1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
114 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
115 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
116 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
117 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
118 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
119 |
+
1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256,
|
120 |
+
1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334,
|
121 |
+
2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999,
|
122 |
+
3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904,
|
123 |
+
4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823,
|
124 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
125 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
126 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
127 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
128 |
+
]
|
129 |
+
assert_allclose(res.x, x_R)
|
130 |
+
|
131 |
+
# Test increasing
|
132 |
+
assert np.all(np.diff(res.x) >= 0)
|
133 |
+
|
134 |
+
# Test balance property: sum(y) == sum(x)
|
135 |
+
assert_allclose(np.sum(res.x), np.sum(y))
|
136 |
+
|
137 |
+
# Reverse order
|
138 |
+
res_inv = isotonic_regression(-y, increasing=False)
|
139 |
+
assert_allclose(-res_inv.x, res.x)
|
140 |
+
assert_equal(res_inv.blocks, res.blocks)
|
141 |
+
|
142 |
+
def test_readonly(self):
|
143 |
+
x = np.arange(3, dtype=float)
|
144 |
+
w = np.ones(3, dtype=float)
|
145 |
+
|
146 |
+
x.flags.writeable = False
|
147 |
+
w.flags.writeable = False
|
148 |
+
|
149 |
+
res = isotonic_regression(x, weights=w)
|
150 |
+
assert np.all(np.isfinite(res.x))
|
151 |
+
assert np.all(np.isfinite(res.weights))
|
152 |
+
assert np.all(np.isfinite(res.blocks))
|
153 |
+
|
154 |
+
def test_non_contiguous_arrays(self):
|
155 |
+
x = np.arange(10, dtype=float)[::3]
|
156 |
+
w = np.ones(10, dtype=float)[::3]
|
157 |
+
assert not x.flags.c_contiguous
|
158 |
+
assert not x.flags.f_contiguous
|
159 |
+
assert not w.flags.c_contiguous
|
160 |
+
assert not w.flags.f_contiguous
|
161 |
+
|
162 |
+
res = isotonic_regression(x, weights=w)
|
163 |
+
assert np.all(np.isfinite(res.x))
|
164 |
+
assert np.all(np.isfinite(res.weights))
|
165 |
+
assert np.all(np.isfinite(res.blocks))
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py
ADDED
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for line search routines
|
3 |
+
"""
|
4 |
+
from numpy.testing import (assert_equal, assert_array_almost_equal,
|
5 |
+
assert_array_almost_equal_nulp, assert_warns,
|
6 |
+
suppress_warnings)
|
7 |
+
import scipy.optimize._linesearch as ls
|
8 |
+
from scipy.optimize._linesearch import LineSearchWarning
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
|
12 |
+
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
|
13 |
+
"""
|
14 |
+
Check that strong Wolfe conditions apply
|
15 |
+
"""
|
16 |
+
phi1 = phi(s)
|
17 |
+
phi0 = phi(0)
|
18 |
+
derphi0 = derphi(0)
|
19 |
+
derphi1 = derphi(s)
|
20 |
+
msg = (f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; phi'(0) = {derphi0};"
|
21 |
+
f" phi'(s) = {derphi1}; {err_msg}")
|
22 |
+
|
23 |
+
assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg
|
24 |
+
assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg
|
25 |
+
|
26 |
+
|
27 |
+
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
|
28 |
+
"""
|
29 |
+
Check that Armijo condition applies
|
30 |
+
"""
|
31 |
+
phi1 = phi(s)
|
32 |
+
phi0 = phi(0)
|
33 |
+
msg = f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; {err_msg}"
|
34 |
+
assert phi1 <= (1 - c1*s)*phi0, msg
|
35 |
+
|
36 |
+
|
37 |
+
def assert_line_wolfe(x, p, s, f, fprime, **kw):
|
38 |
+
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
|
39 |
+
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
|
40 |
+
|
41 |
+
|
42 |
+
def assert_line_armijo(x, p, s, f, **kw):
|
43 |
+
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
|
44 |
+
|
45 |
+
|
46 |
+
def assert_fp_equal(x, y, err_msg="", nulp=50):
|
47 |
+
"""Assert two arrays are equal, up to some floating-point rounding error"""
|
48 |
+
try:
|
49 |
+
assert_array_almost_equal_nulp(x, y, nulp)
|
50 |
+
except AssertionError as e:
|
51 |
+
raise AssertionError(f"{e}\n{err_msg}") from e
|
52 |
+
|
53 |
+
|
54 |
+
class TestLineSearch:
|
55 |
+
# -- scalar functions; must have dphi(0.) < 0
|
56 |
+
def _scalar_func_1(self, s): # skip name check
|
57 |
+
self.fcount += 1
|
58 |
+
p = -s - s**3 + s**4
|
59 |
+
dp = -1 - 3*s**2 + 4*s**3
|
60 |
+
return p, dp
|
61 |
+
|
62 |
+
def _scalar_func_2(self, s): # skip name check
|
63 |
+
self.fcount += 1
|
64 |
+
p = np.exp(-4*s) + s**2
|
65 |
+
dp = -4*np.exp(-4*s) + 2*s
|
66 |
+
return p, dp
|
67 |
+
|
68 |
+
def _scalar_func_3(self, s): # skip name check
|
69 |
+
self.fcount += 1
|
70 |
+
p = -np.sin(10*s)
|
71 |
+
dp = -10*np.cos(10*s)
|
72 |
+
return p, dp
|
73 |
+
|
74 |
+
# -- n-d functions
|
75 |
+
|
76 |
+
def _line_func_1(self, x): # skip name check
|
77 |
+
self.fcount += 1
|
78 |
+
f = np.dot(x, x)
|
79 |
+
df = 2*x
|
80 |
+
return f, df
|
81 |
+
|
82 |
+
def _line_func_2(self, x): # skip name check
|
83 |
+
self.fcount += 1
|
84 |
+
f = np.dot(x, np.dot(self.A, x)) + 1
|
85 |
+
df = np.dot(self.A + self.A.T, x)
|
86 |
+
return f, df
|
87 |
+
|
88 |
+
# --
|
89 |
+
|
90 |
+
def setup_method(self):
|
91 |
+
self.scalar_funcs = []
|
92 |
+
self.line_funcs = []
|
93 |
+
self.N = 20
|
94 |
+
self.fcount = 0
|
95 |
+
|
96 |
+
def bind_index(func, idx):
|
97 |
+
# Remember Python's closure semantics!
|
98 |
+
return lambda *a, **kw: func(*a, **kw)[idx]
|
99 |
+
|
100 |
+
for name in sorted(dir(self)):
|
101 |
+
if name.startswith('_scalar_func_'):
|
102 |
+
value = getattr(self, name)
|
103 |
+
self.scalar_funcs.append(
|
104 |
+
(name, bind_index(value, 0), bind_index(value, 1)))
|
105 |
+
elif name.startswith('_line_func_'):
|
106 |
+
value = getattr(self, name)
|
107 |
+
self.line_funcs.append(
|
108 |
+
(name, bind_index(value, 0), bind_index(value, 1)))
|
109 |
+
|
110 |
+
np.random.seed(1234)
|
111 |
+
self.A = np.random.randn(self.N, self.N)
|
112 |
+
|
113 |
+
def scalar_iter(self):
|
114 |
+
for name, phi, derphi in self.scalar_funcs:
|
115 |
+
for old_phi0 in np.random.randn(3):
|
116 |
+
yield name, phi, derphi, old_phi0
|
117 |
+
|
118 |
+
def line_iter(self):
|
119 |
+
for name, f, fprime in self.line_funcs:
|
120 |
+
k = 0
|
121 |
+
while k < 9:
|
122 |
+
x = np.random.randn(self.N)
|
123 |
+
p = np.random.randn(self.N)
|
124 |
+
if np.dot(p, fprime(x)) >= 0:
|
125 |
+
# always pick a descent direction
|
126 |
+
continue
|
127 |
+
k += 1
|
128 |
+
old_fv = float(np.random.randn())
|
129 |
+
yield name, f, fprime, x, p, old_fv
|
130 |
+
|
131 |
+
# -- Generic scalar searches
|
132 |
+
|
133 |
+
def test_scalar_search_wolfe1(self):
|
134 |
+
c = 0
|
135 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
136 |
+
c += 1
|
137 |
+
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
|
138 |
+
old_phi0, derphi(0))
|
139 |
+
assert_fp_equal(phi0, phi(0), name)
|
140 |
+
assert_fp_equal(phi1, phi(s), name)
|
141 |
+
assert_wolfe(s, phi, derphi, err_msg=name)
|
142 |
+
|
143 |
+
assert c > 3 # check that the iterator really works...
|
144 |
+
|
145 |
+
def test_scalar_search_wolfe2(self):
|
146 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
147 |
+
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
|
148 |
+
phi, derphi, phi(0), old_phi0, derphi(0))
|
149 |
+
assert_fp_equal(phi0, phi(0), name)
|
150 |
+
assert_fp_equal(phi1, phi(s), name)
|
151 |
+
if derphi1 is not None:
|
152 |
+
assert_fp_equal(derphi1, derphi(s), name)
|
153 |
+
assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}")
|
154 |
+
|
155 |
+
def test_scalar_search_wolfe2_with_low_amax(self):
|
156 |
+
def phi(alpha):
|
157 |
+
return (alpha - 5) ** 2
|
158 |
+
|
159 |
+
def derphi(alpha):
|
160 |
+
return 2 * (alpha - 5)
|
161 |
+
|
162 |
+
alpha_star, _, _, derphi_star = ls.scalar_search_wolfe2(phi, derphi, amax=0.001)
|
163 |
+
assert alpha_star is None # Not converged
|
164 |
+
assert derphi_star is None # Not converged
|
165 |
+
|
166 |
+
def test_scalar_search_wolfe2_regression(self):
|
167 |
+
# Regression test for gh-12157
|
168 |
+
# This phi has its minimum at alpha=4/3 ~ 1.333.
|
169 |
+
def phi(alpha):
|
170 |
+
if alpha < 1:
|
171 |
+
return - 3*np.pi/2 * (alpha - 1)
|
172 |
+
else:
|
173 |
+
return np.cos(3*np.pi/2 * alpha - np.pi)
|
174 |
+
|
175 |
+
def derphi(alpha):
|
176 |
+
if alpha < 1:
|
177 |
+
return - 3*np.pi/2
|
178 |
+
else:
|
179 |
+
return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
|
180 |
+
|
181 |
+
s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
|
182 |
+
# Without the fix in gh-13073, the scalar_search_wolfe2
|
183 |
+
# returned s=2.0 instead.
|
184 |
+
assert s < 1.5
|
185 |
+
|
186 |
+
def test_scalar_search_armijo(self):
|
187 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
188 |
+
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
|
189 |
+
assert_fp_equal(phi1, phi(s), name)
|
190 |
+
assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}")
|
191 |
+
|
192 |
+
# -- Generic line searches
|
193 |
+
|
194 |
+
def test_line_search_wolfe1(self):
|
195 |
+
c = 0
|
196 |
+
smax = 100
|
197 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
198 |
+
f0 = f(x)
|
199 |
+
g0 = fprime(x)
|
200 |
+
self.fcount = 0
|
201 |
+
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
|
202 |
+
g0, f0, old_f,
|
203 |
+
amax=smax)
|
204 |
+
assert_equal(self.fcount, fc+gc)
|
205 |
+
assert_fp_equal(ofv, f(x))
|
206 |
+
if s is None:
|
207 |
+
continue
|
208 |
+
assert_fp_equal(fv, f(x + s*p))
|
209 |
+
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
|
210 |
+
if s < smax:
|
211 |
+
c += 1
|
212 |
+
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
|
213 |
+
|
214 |
+
assert c > 3 # check that the iterator really works...
|
215 |
+
|
216 |
+
def test_line_search_wolfe2(self):
|
217 |
+
c = 0
|
218 |
+
smax = 512
|
219 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
220 |
+
f0 = f(x)
|
221 |
+
g0 = fprime(x)
|
222 |
+
self.fcount = 0
|
223 |
+
with suppress_warnings() as sup:
|
224 |
+
sup.filter(LineSearchWarning,
|
225 |
+
"The line search algorithm could not find a solution")
|
226 |
+
sup.filter(LineSearchWarning,
|
227 |
+
"The line search algorithm did not converge")
|
228 |
+
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
|
229 |
+
g0, f0, old_f,
|
230 |
+
amax=smax)
|
231 |
+
assert_equal(self.fcount, fc+gc)
|
232 |
+
assert_fp_equal(ofv, f(x))
|
233 |
+
assert_fp_equal(fv, f(x + s*p))
|
234 |
+
if gv is not None:
|
235 |
+
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
|
236 |
+
if s < smax:
|
237 |
+
c += 1
|
238 |
+
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
|
239 |
+
assert c > 3 # check that the iterator really works...
|
240 |
+
|
241 |
+
def test_line_search_wolfe2_bounds(self):
|
242 |
+
# See gh-7475
|
243 |
+
|
244 |
+
# For this f and p, starting at a point on axis 0, the strong Wolfe
|
245 |
+
# condition 2 is met if and only if the step length s satisfies
|
246 |
+
# |x + s| <= c2 * |x|
|
247 |
+
def f(x):
|
248 |
+
return np.dot(x, x)
|
249 |
+
def fp(x):
|
250 |
+
return 2 * x
|
251 |
+
p = np.array([1, 0])
|
252 |
+
|
253 |
+
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
|
254 |
+
x = -60 * p
|
255 |
+
c2 = 0.5
|
256 |
+
|
257 |
+
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
|
258 |
+
assert_line_wolfe(x, p, s, f, fp)
|
259 |
+
|
260 |
+
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
|
261 |
+
ls.line_search_wolfe2, f, fp, x, p,
|
262 |
+
amax=29, c2=c2)
|
263 |
+
assert s is None
|
264 |
+
|
265 |
+
# s=30 will only be tried on the 6th iteration, so this won't converge
|
266 |
+
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
|
267 |
+
c2=c2, maxiter=5)
|
268 |
+
|
269 |
+
def test_line_search_armijo(self):
|
270 |
+
c = 0
|
271 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
272 |
+
f0 = f(x)
|
273 |
+
g0 = fprime(x)
|
274 |
+
self.fcount = 0
|
275 |
+
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
|
276 |
+
c += 1
|
277 |
+
assert_equal(self.fcount, fc)
|
278 |
+
assert_fp_equal(fv, f(x + s*p))
|
279 |
+
assert_line_armijo(x, p, s, f, err_msg=name)
|
280 |
+
assert c >= 9
|
281 |
+
|
282 |
+
# -- More specific tests
|
283 |
+
|
284 |
+
def test_armijo_terminate_1(self):
|
285 |
+
# Armijo should evaluate the function only once if the trial step
|
286 |
+
# is already suitable
|
287 |
+
count = [0]
|
288 |
+
|
289 |
+
def phi(s):
|
290 |
+
count[0] += 1
|
291 |
+
return -s + 0.01*s**2
|
292 |
+
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
|
293 |
+
assert_equal(s, 1)
|
294 |
+
assert_equal(count[0], 2)
|
295 |
+
assert_armijo(s, phi)
|
296 |
+
|
297 |
+
def test_wolfe_terminate(self):
|
298 |
+
# wolfe1 and wolfe2 should also evaluate the function only a few
|
299 |
+
# times if the trial step is already suitable
|
300 |
+
|
301 |
+
def phi(s):
|
302 |
+
count[0] += 1
|
303 |
+
return -s + 0.05*s**2
|
304 |
+
|
305 |
+
def derphi(s):
|
306 |
+
count[0] += 1
|
307 |
+
return -1 + 0.05*2*s
|
308 |
+
|
309 |
+
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
|
310 |
+
count = [0]
|
311 |
+
r = func(phi, derphi, phi(0), None, derphi(0))
|
312 |
+
assert r[0] is not None, (r, func)
|
313 |
+
assert count[0] <= 2 + 2, (count, func)
|
314 |
+
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_allclose
|
3 |
+
from pytest import raises as assert_raises
|
4 |
+
from scipy.optimize import nnls
|
5 |
+
|
6 |
+
|
7 |
+
class TestNNLS:
|
8 |
+
def setup_method(self):
|
9 |
+
self.rng = np.random.default_rng(1685225766635251)
|
10 |
+
|
11 |
+
def test_nnls(self):
|
12 |
+
a = np.arange(25.0).reshape(-1, 5)
|
13 |
+
x = np.arange(5.0)
|
14 |
+
y = a @ x
|
15 |
+
x, res = nnls(a, y)
|
16 |
+
assert res < 1e-7
|
17 |
+
assert np.linalg.norm((a @ x) - y) < 1e-7
|
18 |
+
|
19 |
+
def test_nnls_tall(self):
|
20 |
+
a = self.rng.uniform(low=-10, high=10, size=[50, 10])
|
21 |
+
x = np.abs(self.rng.uniform(low=-2, high=2, size=[10]))
|
22 |
+
x[::2] = 0
|
23 |
+
b = a @ x
|
24 |
+
xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
|
25 |
+
assert_allclose(xact, x, rtol=0., atol=1e-10)
|
26 |
+
assert rnorm < 1e-12
|
27 |
+
|
28 |
+
def test_nnls_wide(self):
|
29 |
+
# If too wide then problem becomes too ill-conditioned ans starts
|
30 |
+
# emitting warnings, hence small m, n difference.
|
31 |
+
a = self.rng.uniform(low=-10, high=10, size=[100, 120])
|
32 |
+
x = np.abs(self.rng.uniform(low=-2, high=2, size=[120]))
|
33 |
+
x[::2] = 0
|
34 |
+
b = a @ x
|
35 |
+
xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
|
36 |
+
assert_allclose(xact, x, rtol=0., atol=1e-10)
|
37 |
+
assert rnorm < 1e-12
|
38 |
+
|
39 |
+
def test_maxiter(self):
|
40 |
+
# test that maxiter argument does stop iterations
|
41 |
+
a = self.rng.uniform(size=(5, 10))
|
42 |
+
b = self.rng.uniform(size=5)
|
43 |
+
with assert_raises(RuntimeError):
|
44 |
+
nnls(a, b, maxiter=1)
|
45 |
+
|
46 |
+
def test_nnls_inner_loop_case1(self):
|
47 |
+
# See gh-20168
|
48 |
+
n = np.array(
|
49 |
+
[3, 2, 0, 1, 1, 1, 3, 8, 14, 16, 29, 23, 41, 47, 53, 57, 67, 76,
|
50 |
+
103, 89, 97, 94, 85, 95, 78, 78, 78, 77, 73, 50, 50, 56, 68, 98,
|
51 |
+
95, 112, 134, 145, 158, 172, 213, 234, 222, 215, 216, 216, 206,
|
52 |
+
183, 135, 156, 110, 92, 63, 60, 52, 29, 20, 16, 12, 5, 5, 5, 1, 2,
|
53 |
+
3, 0, 2])
|
54 |
+
k = np.array(
|
55 |
+
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
56 |
+
0., 0., 0., 0.7205812007860187, 0., 1.4411624015720375,
|
57 |
+
0.7205812007860187, 2.882324803144075, 5.76464960628815,
|
58 |
+
5.76464960628815, 12.249880413362318, 15.132205216506394,
|
59 |
+
20.176273622008523, 27.382085629868712, 48.27894045266326,
|
60 |
+
47.558359251877235, 68.45521407467177, 97.99904330689854,
|
61 |
+
108.0871801179028, 135.46926574777152, 140.51333415327366,
|
62 |
+
184.4687874012208, 171.49832578707245, 205.36564222401535,
|
63 |
+
244.27702706646033, 214.01261663344755, 228.42424064916793,
|
64 |
+
232.02714665309804, 205.36564222401535, 172.9394881886445,
|
65 |
+
191.67459940908097, 162.1307701768542, 153.48379576742198,
|
66 |
+
110.96950492104689, 103.04311171240067, 86.46974409432225,
|
67 |
+
60.528820866025576, 43.234872047161126, 23.779179625938617,
|
68 |
+
24.499760826724636, 17.29394881886445, 11.5292992125763,
|
69 |
+
5.76464960628815, 5.044068405502131, 3.6029060039300935, 0.,
|
70 |
+
2.882324803144075, 0., 0., 0.])
|
71 |
+
d = np.array(
|
72 |
+
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
73 |
+
0., 0., 0., 0.003889242101538, 0., 0.007606268390096, 0.,
|
74 |
+
0.025457371599973, 0.036952882091577, 0., 0.08518359183449,
|
75 |
+
0.048201126400243, 0.196234990022205, 0.144116240157247,
|
76 |
+
0.171145134062442, 0., 0., 0.269555036538714, 0., 0., 0.,
|
77 |
+
0.010893241091872, 0., 0., 0., 0., 0., 0., 0., 0.,
|
78 |
+
0.048167058272886, 0.011238724891049, 0., 0., 0.055162603456078,
|
79 |
+
0., 0., 0., 0., 0.027753339088588, 0., 0., 0., 0., 0., 0., 0., 0.,
|
80 |
+
0., 0.])
|
81 |
+
# The following code sets up a system of equations such that
|
82 |
+
# $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and
|
83 |
+
# monotonicity constraints on $p_i$. This translates to a system of
|
84 |
+
# equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and
|
85 |
+
# non-negativity constraints on the $d_i$. If $n_i$ is zero the
|
86 |
+
# system is modified such that $d_i - d_{i+1}$ is then minimized.
|
87 |
+
N = len(n)
|
88 |
+
A = np.diag(n) @ np.tril(np.ones((N, N)))
|
89 |
+
w = n ** 0.5
|
90 |
+
|
91 |
+
nz = (n == 0).nonzero()[0]
|
92 |
+
A[nz, nz] = 1
|
93 |
+
A[nz, np.minimum(nz + 1, N - 1)] = -1
|
94 |
+
w[nz] = 1
|
95 |
+
k[nz] = 0
|
96 |
+
W = np.diag(w)
|
97 |
+
|
98 |
+
# Small perturbations can already make the infinite loop go away (just
|
99 |
+
# uncomment the next line)
|
100 |
+
# k = k + 1e-10 * np.random.normal(size=N)
|
101 |
+
dact, _ = nnls(W @ A, W @ k)
|
102 |
+
assert_allclose(dact, d, rtol=0., atol=1e-10)
|
103 |
+
|
104 |
+
def test_nnls_inner_loop_case2(self):
|
105 |
+
# See gh-20168
|
106 |
+
n = np.array(
|
107 |
+
[1, 0, 1, 2, 2, 2, 3, 3, 5, 4, 14, 14, 19, 26, 36, 42, 36, 64, 64,
|
108 |
+
64, 81, 85, 85, 95, 95, 95, 75, 76, 69, 81, 62, 59, 68, 64, 71, 67,
|
109 |
+
74, 78, 118, 135, 153, 159, 210, 195, 218, 243, 236, 215, 196, 175,
|
110 |
+
185, 149, 144, 103, 104, 75, 56, 40, 32, 26, 17, 9, 12, 8, 2, 1, 1,
|
111 |
+
1])
|
112 |
+
k = np.array(
|
113 |
+
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
114 |
+
0., 0., 0., 0., 0., 0.7064355064917867, 0., 0., 2.11930651947536,
|
115 |
+
0.7064355064917867, 0., 3.5321775324589333, 7.064355064917867,
|
116 |
+
11.302968103868587, 16.95445215580288, 20.486629688261814,
|
117 |
+
20.486629688261814, 37.44108184406469, 55.808405012851146,
|
118 |
+
78.41434122058831, 103.13958394780086, 105.965325973768,
|
119 |
+
125.74552015553803, 149.057891869767, 176.60887662294667,
|
120 |
+
197.09550631120848, 211.930651947536, 204.86629688261814,
|
121 |
+
233.8301526487814, 221.1143135319292, 195.6826352982249,
|
122 |
+
197.80194181770025, 191.4440222592742, 187.91184472681525,
|
123 |
+
144.11284332432447, 131.39700420747232, 116.5618585711448,
|
124 |
+
93.24948685691584, 89.01087381796512, 53.68909849337579,
|
125 |
+
45.211872415474346, 31.083162285638615, 24.72524272721253,
|
126 |
+
16.95445215580288, 9.890097090885014, 9.890097090885014,
|
127 |
+
2.8257420259671466, 2.8257420259671466, 1.4128710129835733,
|
128 |
+
0.7064355064917867, 1.4128710129835733])
|
129 |
+
d = np.array(
|
130 |
+
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
131 |
+
0., 0., 0., 0., 0., 0.0021916146355674473, 0., 0.,
|
132 |
+
0.011252740799789484, 0., 0., 0.037746623295934395,
|
133 |
+
0.03602328132946222, 0.09509167709829734, 0.10505765870204821,
|
134 |
+
0.01391037014274718, 0.0188296228752321, 0.20723559202324254,
|
135 |
+
0.3056220879462608, 0.13304643490426477, 0., 0., 0., 0., 0., 0.,
|
136 |
+
0., 0., 0., 0., 0., 0.043185876949706214, 0.0037266261379722554,
|
137 |
+
0., 0., 0., 0., 0., 0.094797899357143, 0., 0., 0., 0., 0., 0., 0.,
|
138 |
+
0., 0.23450935613672663, 0., 0., 0.07064355064917871])
|
139 |
+
# The following code sets up a system of equations such that
|
140 |
+
# $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and
|
141 |
+
# monotonicity constraints on $p_i$. This translates to a system of
|
142 |
+
# equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and
|
143 |
+
# non-negativity constraints on the $d_i$. If $n_i$ is zero the
|
144 |
+
# system is modified such that $d_i - d_{i+1}$ is then minimized.
|
145 |
+
N = len(n)
|
146 |
+
A = np.diag(n) @ np.tril(np.ones((N, N)))
|
147 |
+
w = n ** 0.5
|
148 |
+
|
149 |
+
nz = (n == 0).nonzero()[0]
|
150 |
+
A[nz, nz] = 1
|
151 |
+
A[nz, np.minimum(nz + 1, N - 1)] = -1
|
152 |
+
w[nz] = 1
|
153 |
+
k[nz] = 0
|
154 |
+
W = np.diag(w)
|
155 |
+
|
156 |
+
dact, _ = nnls(W @ A, W @ k, atol=1e-7)
|
157 |
+
|
158 |
+
p = np.cumsum(dact)
|
159 |
+
assert np.all(dact >= 0)
|
160 |
+
assert np.linalg.norm(k - n * p, ord=np.inf) < 28
|
161 |
+
assert_allclose(dact, d, rtol=0., atol=1e-10)
|
162 |
+
|
163 |
+
def test_nnls_gh20302(self):
|
164 |
+
# See gh-20302
|
165 |
+
A = np.array(
|
166 |
+
[0.33408569134321575, 0.11136189711440525, 0.049140798007949286,
|
167 |
+
0.03712063237146841, 0.055680948557202625, 0.16642814595936478,
|
168 |
+
0.11095209730624318, 0.09791993030943345, 0.14793612974165757,
|
169 |
+
0.44380838922497273, 0.11099502671044059, 0.11099502671044059,
|
170 |
+
0.14693672599330593, 0.3329850801313218, 1.498432860590948,
|
171 |
+
0.0832374225132955, 0.11098323001772734, 0.19589481249472837,
|
172 |
+
0.5919105600945457, 3.5514633605672747, 0.06658716751427037,
|
173 |
+
0.11097861252378394, 0.24485832778293645, 0.9248217710315328,
|
174 |
+
6.936163282736496, 0.05547609388181014, 0.11095218776362029,
|
175 |
+
0.29376003042571264, 1.3314262531634435, 11.982836278470993,
|
176 |
+
0.047506113282944136, 0.11084759766020298, 0.3423969672933396,
|
177 |
+
1.8105107617833156, 19.010362998724812, 0.041507335004505576,
|
178 |
+
0.11068622667868154, 0.39074115283013344, 2.361306169145206,
|
179 |
+
28.335674029742474, 0.03682846280947718, 0.11048538842843154,
|
180 |
+
0.4387861797121048, 2.9831054875676517, 40.2719240821633,
|
181 |
+
0.03311278164362387, 0.11037593881207958, 0.4870572300443105,
|
182 |
+
3.6791979604026523, 55.187969406039784, 0.030079304092299915,
|
183 |
+
0.11029078167176636, 0.5353496017200152, 4.448394860761242,
|
184 |
+
73.3985152025605, 0.02545939709595835, 0.11032405408248619,
|
185 |
+
0.6328767609778363, 6.214921713313388, 121.19097340961108,
|
186 |
+
0.022080881724881523, 0.11040440862440762, 0.7307742886903428,
|
187 |
+
8.28033064683057, 186.30743955368786, 0.020715838214945492,
|
188 |
+
0.1104844704797093, 0.7800578384588346, 9.42800814760186,
|
189 |
+
226.27219554244465, 0.01843179728340054, 0.11059078370040323,
|
190 |
+
0.8784095015912599, 11.94380463964355, 322.48272527037585,
|
191 |
+
0.015812787653789077, 0.11068951357652354, 1.0257259848595766,
|
192 |
+
16.27135849574896, 512.5477926160922, 0.014438550529330062,
|
193 |
+
0.11069555405819713, 1.1234754801775881, 19.519316032262093,
|
194 |
+
673.4164031130423, 0.012760770585072577, 0.110593345070629,
|
195 |
+
1.2688431112524712, 24.920367089248398, 971.8943164806875,
|
196 |
+
0.011427556646114315, 0.11046638091243838, 1.413623342459821,
|
197 |
+
30.967408782453557, 1347.0822820367298, 0.010033330264470307,
|
198 |
+
0.11036663290917338, 1.6071533470570285, 40.063087746029936,
|
199 |
+
1983.122843428482, 0.008950061496507258, 0.11038409179025618,
|
200 |
+
1.802244865119193, 50.37194055362024, 2795.642700725923,
|
201 |
+
0.008071078821135658, 0.11030474388885401, 1.9956465761433504,
|
202 |
+
61.80742482572119, 3801.1566267818534, 0.007191031207777556,
|
203 |
+
0.11026247851925586, 2.238160187262168, 77.7718015155818,
|
204 |
+
5366.2543045751445, 0.00636834224248, 0.11038459886965334,
|
205 |
+
2.5328963107984297, 99.49331844784753, 7760.4788389321075,
|
206 |
+
0.005624259098118485, 0.11061042892966355, 2.879742607664547,
|
207 |
+
128.34496770138628, 11358.529641572684, 0.0050354270614989555,
|
208 |
+
0.11077939535297703, 3.2263279459292575, 160.85168205252265,
|
209 |
+
15924.316523199741, 0.0044997853165982555, 0.1109947044760903,
|
210 |
+
3.6244287189055613, 202.60233390369015, 22488.859063309606,
|
211 |
+
0.004023601950058174, 0.1113196539516095, 4.07713905729421,
|
212 |
+
255.6270320242126, 31825.565487014468, 0.0036024117873727094,
|
213 |
+
0.111674765408554, 4.582933773135057, 321.9583486728612,
|
214 |
+
44913.18963986413, 0.003201503089582304, 0.11205260813538065,
|
215 |
+
5.191786833370116, 411.79333489752383, 64857.45024636,
|
216 |
+
0.0028633044552448853, 0.11262330857296549, 5.864295861648949,
|
217 |
+
522.7223161899905, 92521.84996562831, 0.0025691897303891965,
|
218 |
+
0.11304434813712465, 6.584584405106342, 656.5615739804199,
|
219 |
+
129999.19164812315, 0.0022992911894424675, 0.11343169867916175,
|
220 |
+
7.4080129906658305, 828.2026426227864, 183860.98666225857,
|
221 |
+
0.0020449922071108764, 0.11383789952917212, 8.388975556433872,
|
222 |
+
1058.2750599896935, 265097.9025274183, 0.001831274615120854,
|
223 |
+
0.11414945100919989, 9.419351803810935, 1330.564050780237,
|
224 |
+
373223.2162438565, 0.0016363333454631633, 0.11454333418242145,
|
225 |
+
10.6143816579462, 1683.787012481595, 530392.9089317025,
|
226 |
+
0.0014598610433380044, 0.11484240207592301, 11.959688127956882,
|
227 |
+
2132.0874753402027, 754758.9662704318, 0.0012985240015312626,
|
228 |
+
0.11513579480243862, 13.514425358573531, 2715.5160990137824,
|
229 |
+
1083490.9235064993, 0.0011614735761289934, 0.11537304189548002,
|
230 |
+
15.171418602667567, 3415.195870828736, 1526592.554260445,
|
231 |
+
0.0010347472698811352, 0.11554677847006009, 17.080800985009617,
|
232 |
+
4322.412404600832, 2172012.2333119176, 0.0009232988811258664,
|
233 |
+
0.1157201264344419, 19.20004861829407, 5453.349531598553,
|
234 |
+
3075689.135821584, 0.0008228871862975205, 0.11602709326795038,
|
235 |
+
21.65735242414206, 6920.203923780365, 4390869.389638642,
|
236 |
+
0.00073528900066722, 0.11642075843897651, 24.40223571298994,
|
237 |
+
8755.811207598026, 6238515.485413593, 0.0006602764384729194,
|
238 |
+
0.11752920604817965, 27.694443541914293, 11171.386093291572,
|
239 |
+
8948280.260726549, 0.0005935538977939806, 0.11851292825953147,
|
240 |
+
31.325508920763063, 14174.185724149384, 12735505.873148222,
|
241 |
+
0.0005310755355633124, 0.11913794514470308, 35.381052949627765,
|
242 |
+
17987.010118815077, 18157886.71494382, 0.00047239949671590953,
|
243 |
+
0.1190446731724092, 39.71342528048061, 22679.438775422022,
|
244 |
+
25718483.571328573, 0.00041829129789387623, 0.11851586773659825,
|
245 |
+
44.45299332965028, 28542.57147989741, 36391778.63686921,
|
246 |
+
0.00037321512015419886, 0.11880681324908665, 50.0668539579632,
|
247 |
+
36118.26128449941, 51739409.29004541, 0.0003315539616702064,
|
248 |
+
0.1184752823034871, 56.04387059062639, 45383.29960621684,
|
249 |
+
72976345.76679668, 0.00029456064937920213, 0.11831519416731286,
|
250 |
+
62.91195073220101, 57265.53993693082, 103507463.43600245,
|
251 |
+
0.00026301867496859703, 0.11862142241083726, 70.8217262087034,
|
252 |
+
72383.14781936012, 146901598.49939138, 0.00023618734450420032,
|
253 |
+
0.11966825454879482, 80.26535457124461, 92160.51176984518,
|
254 |
+
210125966.835247, 0.00021165918071578316, 0.12043407382728061,
|
255 |
+
90.7169587544247, 116975.56852918258, 299515943.218972,
|
256 |
+
0.00018757727511329545, 0.11992440455576689, 101.49899864101785,
|
257 |
+
147056.26174166967, 423080865.0307836, 0.00016654469159895833,
|
258 |
+
0.11957908856805206, 113.65970431102812, 184937.67016486943,
|
259 |
+
597533612.3026931, 0.00014717439179415048, 0.11872067604728138,
|
260 |
+
126.77899683346702, 231758.58906776624, 841283678.3159915,
|
261 |
+
0.00012868496382376066, 0.1166314722122684, 139.93635237349534,
|
262 |
+
287417.30847929465, 1172231492.6328032, 0.00011225559452625302,
|
263 |
+
0.11427619522772557, 154.0034283704458, 355281.4912295324,
|
264 |
+
1627544511.322488, 9.879511142981067e-05, 0.11295574406808354,
|
265 |
+
170.96532050841535, 442971.0111288653, 2279085852.2580123,
|
266 |
+
8.71257780313587e-05, 0.11192758284428547, 190.35067416684697,
|
267 |
+
554165.2523674504, 3203629323.93623, 7.665069027765277e-05,
|
268 |
+
0.11060694607065294, 211.28835951100046, 690933.608546013,
|
269 |
+
4486577387.093535, 6.734021094824451e-05, 0.10915848194710433,
|
270 |
+
234.24338803525194, 860487.9079859136, 6276829044.8032465,
|
271 |
+
5.9191625040287665e-05, 0.10776821865668373, 259.7454711820425,
|
272 |
+
1071699.0387579766, 8780430224.544102, 5.1856803674907676e-05,
|
273 |
+
0.10606444911641115, 287.1843540288165, 1331126.3723998806,
|
274 |
+
12251687131.5685, 4.503421404759231e-05, 0.10347361247668461,
|
275 |
+
314.7338642485931, 1638796.0697522392, 16944331963.203278,
|
276 |
+
3.90470387455642e-05, 0.1007804070023012, 344.3427560918527,
|
277 |
+
2014064.4865519698, 23392351979.057854, 3.46557661636393e-05,
|
278 |
+
0.10046706610839032, 385.56603915081587, 2533036.2523656,
|
279 |
+
33044724430.235435, 3.148745865254635e-05, 0.1025441570117926,
|
280 |
+
442.09038234164746, 3262712.3882769793, 47815050050.199135,
|
281 |
+
2.9790762078715404e-05, 0.1089845379379672, 527.8068231298969,
|
282 |
+
4375751.903321453, 72035815708.42941, 2.8772639817606534e-05,
|
283 |
+
0.11823636789048445, 643.2048194503195, 5989838.001888927,
|
284 |
+
110764084330.93005, 2.7951691815106586e-05, 0.12903432664913705,
|
285 |
+
788.5500418523591, 8249371.000613411, 171368308481.2427,
|
286 |
+
2.6844392423114212e-05, 0.1392060709754626, 955.6296403631383,
|
287 |
+
11230229.319931043, 262063016295.25085, 2.499458273851386e-05,
|
288 |
+
0.14559344445184325, 1122.7022399726002, 14820229.698461473,
|
289 |
+
388475270970.9214, 2.337386729019776e-05, 0.15294300496886065,
|
290 |
+
1324.8158105672455, 19644861.137128454, 578442936182.7473,
|
291 |
+
2.0081014872174113e-05, 0.14760215298210377, 1436.2385042492353,
|
292 |
+
23923681.729276657, 791311658718.4193, 1.773374462991839e-05,
|
293 |
+
0.14642752940923615, 1600.5596278736678, 29949429.82503553,
|
294 |
+
1112815989293.9326, 1.5303115839590797e-05, 0.14194150045081785,
|
295 |
+
1742.873058605698, 36634451.931305364, 1529085389160.7544,
|
296 |
+
1.3148448731163076e-05, 0.13699368732998807, 1889.5284359054356,
|
297 |
+
44614279.74469635, 2091762812969.9607, 1.1739194407590062e-05,
|
298 |
+
0.13739553134643406, 2128.794599579694, 56462810.11822766,
|
299 |
+
2973783283306.8145, 1.0293367506254706e-05, 0.13533033372723272,
|
300 |
+
2355.372854690074, 70176508.28667311, 4151852759764.441,
|
301 |
+
9.678312586863569e-06, 0.14293577249119244, 2794.531827932675,
|
302 |
+
93528671.31952812, 6215821967224.52, -1.174086323572049e-05,
|
303 |
+
0.1429501325944908, 3139.4804810720925, 118031680.16618933,
|
304 |
+
-6466892421886.174, -2.1188265307407812e-05, 0.1477108290912869,
|
305 |
+
3644.1133424610953, 153900132.62392554, -4828013117542.036,
|
306 |
+
-8.614483025123122e-05, 0.16037100755883044, 4444.386620899393,
|
307 |
+
210846007.89660168, -1766340937974.433, 4.981445776141726e-05,
|
308 |
+
0.16053420251962536, 4997.558254401547, 266327328.4755411,
|
309 |
+
3862250287024.725, 1.8500019169456637e-05, 0.15448417164977674,
|
310 |
+
5402.289867444643, 323399508.1475582, 12152445411933.408,
|
311 |
+
-5.647882376069748e-05, 0.1406372975946189, 5524.633133597753,
|
312 |
+
371512945.9909363, -4162951345292.1514, 2.8048523486337994e-05,
|
313 |
+
0.13183417571186926, 5817.462495763679, 439447252.3728975,
|
314 |
+
9294740538175.03]).reshape(89, 5)
|
315 |
+
b = np.ones(89, dtype=np.float64)
|
316 |
+
sol, rnorm = nnls(A, b)
|
317 |
+
assert_allclose(sol, np.array([0.61124315, 8.22262829, 0., 0., 0.]))
|
318 |
+
assert_allclose(rnorm, 1.0556460808977297)
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py
ADDED
@@ -0,0 +1,534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Unit tests for nonlinear solvers
|
2 |
+
Author: Ondrej Certik
|
3 |
+
May 2007
|
4 |
+
"""
|
5 |
+
from numpy.testing import assert_
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from scipy.optimize import _nonlin as nonlin, root
|
9 |
+
from scipy.sparse import csr_array
|
10 |
+
from numpy import diag, dot
|
11 |
+
from numpy.linalg import inv
|
12 |
+
import numpy as np
|
13 |
+
import scipy
|
14 |
+
|
15 |
+
from .test_minpack import pressure_network
|
16 |
+
|
17 |
+
SOLVERS = {'anderson': nonlin.anderson,
|
18 |
+
'diagbroyden': nonlin.diagbroyden,
|
19 |
+
'linearmixing': nonlin.linearmixing,
|
20 |
+
'excitingmixing': nonlin.excitingmixing,
|
21 |
+
'broyden1': nonlin.broyden1,
|
22 |
+
'broyden2': nonlin.broyden2,
|
23 |
+
'krylov': nonlin.newton_krylov}
|
24 |
+
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
|
25 |
+
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
|
26 |
+
|
27 |
+
# ----------------------------------------------------------------------------
|
28 |
+
# Test problems
|
29 |
+
# ----------------------------------------------------------------------------
|
30 |
+
|
31 |
+
|
32 |
+
def F(x):
|
33 |
+
x = np.asarray(x).T
|
34 |
+
d = diag([3, 2, 1.5, 1, 0.5])
|
35 |
+
c = 0.01
|
36 |
+
f = -d @ x - c * float(x.T @ x) * x
|
37 |
+
return f
|
38 |
+
|
39 |
+
|
40 |
+
F.xin = [1, 1, 1, 1, 1]
|
41 |
+
F.KNOWN_BAD = {}
|
42 |
+
F.JAC_KSP_BAD = {}
|
43 |
+
F.ROOT_JAC_KSP_BAD = {}
|
44 |
+
|
45 |
+
|
46 |
+
def F2(x):
|
47 |
+
return x
|
48 |
+
|
49 |
+
|
50 |
+
F2.xin = [1, 2, 3, 4, 5, 6]
|
51 |
+
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
|
52 |
+
'excitingmixing': nonlin.excitingmixing}
|
53 |
+
F2.JAC_KSP_BAD = {}
|
54 |
+
F2.ROOT_JAC_KSP_BAD = {}
|
55 |
+
|
56 |
+
|
57 |
+
def F2_lucky(x):
|
58 |
+
return x
|
59 |
+
|
60 |
+
|
61 |
+
F2_lucky.xin = [0, 0, 0, 0, 0, 0]
|
62 |
+
F2_lucky.KNOWN_BAD = {}
|
63 |
+
F2_lucky.JAC_KSP_BAD = {}
|
64 |
+
F2_lucky.ROOT_JAC_KSP_BAD = {}
|
65 |
+
|
66 |
+
|
67 |
+
def F3(x):
|
68 |
+
A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]])
|
69 |
+
b = np.array([1, 2, 3.])
|
70 |
+
return A @ x - b
|
71 |
+
|
72 |
+
|
73 |
+
F3.xin = [1, 2, 3]
|
74 |
+
F3.KNOWN_BAD = {}
|
75 |
+
F3.JAC_KSP_BAD = {}
|
76 |
+
F3.ROOT_JAC_KSP_BAD = {}
|
77 |
+
|
78 |
+
|
79 |
+
def F4_powell(x):
|
80 |
+
A = 1e4
|
81 |
+
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
|
82 |
+
|
83 |
+
|
84 |
+
F4_powell.xin = [-1, -2]
|
85 |
+
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
|
86 |
+
'excitingmixing': nonlin.excitingmixing,
|
87 |
+
'diagbroyden': nonlin.diagbroyden}
|
88 |
+
# In the extreme case, it does not converge for nolinear problem solved by
|
89 |
+
# MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using
|
90 |
+
# Krylov method to approximate Jacobian
|
91 |
+
F4_powell.JAC_KSP_BAD = {'minres'}
|
92 |
+
F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'}
|
93 |
+
|
94 |
+
|
95 |
+
def F5(x):
|
96 |
+
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
|
97 |
+
|
98 |
+
|
99 |
+
F5.xin = [2., 0, 2, 0]
|
100 |
+
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
|
101 |
+
'linearmixing': nonlin.linearmixing,
|
102 |
+
'diagbroyden': nonlin.diagbroyden}
|
103 |
+
# In the extreme case, the Jacobian inversion yielded zero vector for nonlinear
|
104 |
+
# problem solved by CGS/MINRES and it does not converge for root problem solved
|
105 |
+
# by MINRES and when using Krylov method to approximate Jacobian
|
106 |
+
F5.JAC_KSP_BAD = {'cgs', 'minres'}
|
107 |
+
F5.ROOT_JAC_KSP_BAD = {'minres'}
|
108 |
+
|
109 |
+
|
110 |
+
def F6(x):
|
111 |
+
x1, x2 = x
|
112 |
+
J0 = np.array([[-4.256, 14.7],
|
113 |
+
[0.8394989, 0.59964207]])
|
114 |
+
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
|
115 |
+
np.sin(x2 * np.exp(x1) - 1)])
|
116 |
+
return -np.linalg.solve(J0, v)
|
117 |
+
|
118 |
+
|
119 |
+
F6.xin = [-0.5, 1.4]
|
120 |
+
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
|
121 |
+
'linearmixing': nonlin.linearmixing,
|
122 |
+
'diagbroyden': nonlin.diagbroyden}
|
123 |
+
F6.JAC_KSP_BAD = {}
|
124 |
+
F6.ROOT_JAC_KSP_BAD = {}
|
125 |
+
|
126 |
+
|
127 |
+
# ----------------------------------------------------------------------------
|
128 |
+
# Tests
|
129 |
+
# ----------------------------------------------------------------------------
|
130 |
+
|
131 |
+
|
132 |
+
class TestNonlin:
|
133 |
+
"""
|
134 |
+
Check the Broyden methods for a few test problems.
|
135 |
+
|
136 |
+
broyden1, broyden2, and newton_krylov must succeed for
|
137 |
+
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
|
138 |
+
|
139 |
+
"""
|
140 |
+
|
141 |
+
def _check_nonlin_func(self, f, func, f_tol=1e-2):
|
142 |
+
# Test all methods mentioned in the class `KrylovJacobian`
|
143 |
+
if func == SOLVERS['krylov']:
|
144 |
+
for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
|
145 |
+
if method in f.JAC_KSP_BAD:
|
146 |
+
continue
|
147 |
+
|
148 |
+
x = func(f, f.xin, method=method, line_search=None,
|
149 |
+
f_tol=f_tol, maxiter=200, verbose=0)
|
150 |
+
assert_(np.absolute(f(x)).max() < f_tol)
|
151 |
+
|
152 |
+
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
|
153 |
+
assert_(np.absolute(f(x)).max() < f_tol)
|
154 |
+
|
155 |
+
def _check_root(self, f, method, f_tol=1e-2):
|
156 |
+
# Test Krylov methods
|
157 |
+
if method == 'krylov':
|
158 |
+
for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
|
159 |
+
if jac_method in f.ROOT_JAC_KSP_BAD:
|
160 |
+
continue
|
161 |
+
|
162 |
+
res = root(f, f.xin, method=method,
|
163 |
+
options={'ftol': f_tol, 'maxiter': 200,
|
164 |
+
'disp': 0,
|
165 |
+
'jac_options': {'method': jac_method}})
|
166 |
+
assert_(np.absolute(res.fun).max() < f_tol)
|
167 |
+
|
168 |
+
res = root(f, f.xin, method=method,
|
169 |
+
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
|
170 |
+
assert_(np.absolute(res.fun).max() < f_tol)
|
171 |
+
|
172 |
+
@pytest.mark.xfail
|
173 |
+
def _check_func_fail(self, *a, **kw):
|
174 |
+
pass
|
175 |
+
|
176 |
+
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
|
177 |
+
def test_problem_nonlin(self):
|
178 |
+
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
|
179 |
+
for func in SOLVERS.values():
|
180 |
+
if func in f.KNOWN_BAD.values():
|
181 |
+
if func in MUST_WORK.values():
|
182 |
+
self._check_func_fail(f, func)
|
183 |
+
continue
|
184 |
+
self._check_nonlin_func(f, func)
|
185 |
+
|
186 |
+
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
|
187 |
+
@pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs',
|
188 |
+
'minres', 'tfqmr'])
|
189 |
+
def test_tol_norm_called(self, method):
|
190 |
+
# Check that supplying tol_norm keyword to nonlin_solve works
|
191 |
+
self._tol_norm_used = False
|
192 |
+
|
193 |
+
def local_norm_func(x):
|
194 |
+
self._tol_norm_used = True
|
195 |
+
return np.absolute(x).max()
|
196 |
+
|
197 |
+
nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2,
|
198 |
+
maxiter=200, verbose=0,
|
199 |
+
tol_norm=local_norm_func)
|
200 |
+
assert_(self._tol_norm_used)
|
201 |
+
|
202 |
+
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
|
203 |
+
def test_problem_root(self):
|
204 |
+
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
|
205 |
+
for meth in SOLVERS:
|
206 |
+
if meth in f.KNOWN_BAD:
|
207 |
+
if meth in MUST_WORK:
|
208 |
+
self._check_func_fail(f, meth)
|
209 |
+
continue
|
210 |
+
self._check_root(f, meth)
|
211 |
+
|
212 |
+
def test_no_convergence(self):
|
213 |
+
def wont_converge(x):
|
214 |
+
return 1e3 + x
|
215 |
+
|
216 |
+
with pytest.raises(scipy.optimize.NoConvergence):
|
217 |
+
nonlin.newton_krylov(wont_converge, xin=[0], maxiter=1)
|
218 |
+
|
219 |
+
|
220 |
+
class TestSecant:
|
221 |
+
"""Check that some Jacobian approximations satisfy the secant condition"""
|
222 |
+
|
223 |
+
xs = [np.array([1., 2., 3., 4., 5.]),
|
224 |
+
np.array([2., 3., 4., 5., 1.]),
|
225 |
+
np.array([3., 4., 5., 1., 2.]),
|
226 |
+
np.array([4., 5., 1., 2., 3.]),
|
227 |
+
np.array([9., 1., 9., 1., 3.]),
|
228 |
+
np.array([0., 1., 9., 1., 3.]),
|
229 |
+
np.array([5., 5., 7., 1., 1.]),
|
230 |
+
np.array([1., 2., 7., 5., 1.]),]
|
231 |
+
fs = [x**2 - 1 for x in xs]
|
232 |
+
|
233 |
+
def _check_secant(self, jac_cls, npoints=1, **kw):
|
234 |
+
"""
|
235 |
+
Check that the given Jacobian approximation satisfies secant
|
236 |
+
conditions for last `npoints` points.
|
237 |
+
"""
|
238 |
+
jac = jac_cls(**kw)
|
239 |
+
jac.setup(self.xs[0], self.fs[0], None)
|
240 |
+
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
|
241 |
+
jac.update(x, f)
|
242 |
+
|
243 |
+
for k in range(min(npoints, j+1)):
|
244 |
+
dx = self.xs[j-k+1] - self.xs[j-k]
|
245 |
+
df = self.fs[j-k+1] - self.fs[j-k]
|
246 |
+
assert_(np.allclose(dx, jac.solve(df)))
|
247 |
+
|
248 |
+
# Check that the `npoints` secant bound is strict
|
249 |
+
if j >= npoints:
|
250 |
+
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
|
251 |
+
df = self.fs[j-npoints+1] - self.fs[j-npoints]
|
252 |
+
assert_(not np.allclose(dx, jac.solve(df)))
|
253 |
+
|
254 |
+
def test_broyden1(self):
|
255 |
+
self._check_secant(nonlin.BroydenFirst)
|
256 |
+
|
257 |
+
def test_broyden2(self):
|
258 |
+
self._check_secant(nonlin.BroydenSecond)
|
259 |
+
|
260 |
+
def test_broyden1_update(self):
|
261 |
+
# Check that BroydenFirst update works as for a dense matrix
|
262 |
+
jac = nonlin.BroydenFirst(alpha=0.1)
|
263 |
+
jac.setup(self.xs[0], self.fs[0], None)
|
264 |
+
|
265 |
+
B = np.identity(5) * (-1/0.1)
|
266 |
+
|
267 |
+
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
|
268 |
+
df = f - self.fs[last_j]
|
269 |
+
dx = x - self.xs[last_j]
|
270 |
+
B += (df - dot(B, dx))[:, None] * dx[None, :] / dot(dx, dx)
|
271 |
+
jac.update(x, f)
|
272 |
+
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
|
273 |
+
|
274 |
+
def test_broyden2_update(self):
|
275 |
+
# Check that BroydenSecond update works as for a dense matrix
|
276 |
+
jac = nonlin.BroydenSecond(alpha=0.1)
|
277 |
+
jac.setup(self.xs[0], self.fs[0], None)
|
278 |
+
|
279 |
+
H = np.identity(5) * (-0.1)
|
280 |
+
|
281 |
+
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
|
282 |
+
df = f - self.fs[last_j]
|
283 |
+
dx = x - self.xs[last_j]
|
284 |
+
H += (dx - dot(H, df))[:, None] * df[None, :] / dot(df, df)
|
285 |
+
jac.update(x, f)
|
286 |
+
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
|
287 |
+
|
288 |
+
def test_anderson(self):
|
289 |
+
# Anderson mixing (with w0=0) satisfies secant conditions
|
290 |
+
# for the last M iterates, see [Ey]_
|
291 |
+
#
|
292 |
+
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
|
293 |
+
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
|
294 |
+
|
295 |
+
|
296 |
+
class TestLinear:
|
297 |
+
"""Solve a linear equation;
|
298 |
+
some methods find the exact solution in a finite number of steps"""
|
299 |
+
|
300 |
+
def _check(self, jac, N, maxiter, complex=False, **kw):
|
301 |
+
np.random.seed(123)
|
302 |
+
|
303 |
+
A = np.random.randn(N, N)
|
304 |
+
if complex:
|
305 |
+
A = A + 1j*np.random.randn(N, N)
|
306 |
+
b = np.random.randn(N)
|
307 |
+
if complex:
|
308 |
+
b = b + 1j*np.random.randn(N)
|
309 |
+
|
310 |
+
def func(x):
|
311 |
+
return dot(A, x) - b
|
312 |
+
|
313 |
+
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
|
314 |
+
f_tol=1e-6, line_search=None, verbose=0)
|
315 |
+
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
|
316 |
+
|
317 |
+
def test_broyden1(self):
|
318 |
+
# Broyden methods solve linear systems exactly in 2*N steps
|
319 |
+
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
|
320 |
+
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
|
321 |
+
|
322 |
+
def test_broyden2(self):
|
323 |
+
# Broyden methods solve linear systems exactly in 2*N steps
|
324 |
+
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
|
325 |
+
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
|
326 |
+
|
327 |
+
def test_anderson(self):
|
328 |
+
# Anderson is rather similar to Broyden, if given enough storage space
|
329 |
+
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
|
330 |
+
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
|
331 |
+
|
332 |
+
def test_krylov(self):
|
333 |
+
# Krylov methods solve linear systems exactly in N inner steps
|
334 |
+
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
|
335 |
+
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
|
336 |
+
|
337 |
+
def _check_autojac(self, A, b):
|
338 |
+
def func(x):
|
339 |
+
return A.dot(x) - b
|
340 |
+
|
341 |
+
def jac(v):
|
342 |
+
return A
|
343 |
+
|
344 |
+
sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), jac, maxiter=2,
|
345 |
+
f_tol=1e-6, line_search=None, verbose=0)
|
346 |
+
np.testing.assert_allclose(A @ sol, b, atol=1e-6)
|
347 |
+
# test jac input as array -- not a function
|
348 |
+
sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), A, maxiter=2,
|
349 |
+
f_tol=1e-6, line_search=None, verbose=0)
|
350 |
+
np.testing.assert_allclose(A @ sol, b, atol=1e-6)
|
351 |
+
|
352 |
+
def test_jac_sparse(self):
|
353 |
+
A = csr_array([[1, 2], [2, 1]])
|
354 |
+
b = np.array([1, -1])
|
355 |
+
self._check_autojac(A, b)
|
356 |
+
self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
|
357 |
+
|
358 |
+
def test_jac_ndarray(self):
|
359 |
+
A = np.array([[1, 2], [2, 1]])
|
360 |
+
b = np.array([1, -1])
|
361 |
+
self._check_autojac(A, b)
|
362 |
+
self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
|
363 |
+
|
364 |
+
|
365 |
+
class TestJacobianDotSolve:
|
366 |
+
"""
|
367 |
+
Check that solve/dot methods in Jacobian approximations are consistent
|
368 |
+
"""
|
369 |
+
|
370 |
+
def _func(self, x):
|
371 |
+
return x**2 - 1 + np.dot(self.A, x)
|
372 |
+
|
373 |
+
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
|
374 |
+
np.random.seed(123)
|
375 |
+
|
376 |
+
N = 7
|
377 |
+
|
378 |
+
def rand(*a):
|
379 |
+
q = np.random.rand(*a)
|
380 |
+
if complex:
|
381 |
+
q = q + 1j*np.random.rand(*a)
|
382 |
+
return q
|
383 |
+
|
384 |
+
def assert_close(a, b, msg):
|
385 |
+
d = abs(a - b).max()
|
386 |
+
f = tol + abs(b).max()*tol
|
387 |
+
if d > f:
|
388 |
+
raise AssertionError(f'{msg}: err {d:g}')
|
389 |
+
|
390 |
+
self.A = rand(N, N)
|
391 |
+
|
392 |
+
# initialize
|
393 |
+
x0 = np.random.rand(N)
|
394 |
+
jac = jac_cls(**kw)
|
395 |
+
jac.setup(x0, self._func(x0), self._func)
|
396 |
+
|
397 |
+
# check consistency
|
398 |
+
for k in range(2*N):
|
399 |
+
v = rand(N)
|
400 |
+
|
401 |
+
if hasattr(jac, '__array__'):
|
402 |
+
Jd = np.array(jac)
|
403 |
+
if hasattr(jac, 'solve'):
|
404 |
+
Gv = jac.solve(v)
|
405 |
+
Gv2 = np.linalg.solve(Jd, v)
|
406 |
+
assert_close(Gv, Gv2, 'solve vs array')
|
407 |
+
if hasattr(jac, 'rsolve'):
|
408 |
+
Gv = jac.rsolve(v)
|
409 |
+
Gv2 = np.linalg.solve(Jd.T.conj(), v)
|
410 |
+
assert_close(Gv, Gv2, 'rsolve vs array')
|
411 |
+
if hasattr(jac, 'matvec'):
|
412 |
+
Jv = jac.matvec(v)
|
413 |
+
Jv2 = np.dot(Jd, v)
|
414 |
+
assert_close(Jv, Jv2, 'dot vs array')
|
415 |
+
if hasattr(jac, 'rmatvec'):
|
416 |
+
Jv = jac.rmatvec(v)
|
417 |
+
Jv2 = np.dot(Jd.T.conj(), v)
|
418 |
+
assert_close(Jv, Jv2, 'rmatvec vs array')
|
419 |
+
|
420 |
+
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
|
421 |
+
Jv = jac.matvec(v)
|
422 |
+
Jv2 = jac.solve(jac.matvec(Jv))
|
423 |
+
assert_close(Jv, Jv2, 'dot vs solve')
|
424 |
+
|
425 |
+
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
|
426 |
+
Jv = jac.rmatvec(v)
|
427 |
+
Jv2 = jac.rmatvec(jac.rsolve(Jv))
|
428 |
+
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
|
429 |
+
|
430 |
+
x = rand(N)
|
431 |
+
jac.update(x, self._func(x))
|
432 |
+
|
433 |
+
def test_broyden1(self):
|
434 |
+
self._check_dot(nonlin.BroydenFirst, complex=False)
|
435 |
+
self._check_dot(nonlin.BroydenFirst, complex=True)
|
436 |
+
|
437 |
+
def test_broyden2(self):
|
438 |
+
self._check_dot(nonlin.BroydenSecond, complex=False)
|
439 |
+
self._check_dot(nonlin.BroydenSecond, complex=True)
|
440 |
+
|
441 |
+
def test_anderson(self):
|
442 |
+
self._check_dot(nonlin.Anderson, complex=False)
|
443 |
+
self._check_dot(nonlin.Anderson, complex=True)
|
444 |
+
|
445 |
+
def test_diagbroyden(self):
|
446 |
+
self._check_dot(nonlin.DiagBroyden, complex=False)
|
447 |
+
self._check_dot(nonlin.DiagBroyden, complex=True)
|
448 |
+
|
449 |
+
def test_linearmixing(self):
|
450 |
+
self._check_dot(nonlin.LinearMixing, complex=False)
|
451 |
+
self._check_dot(nonlin.LinearMixing, complex=True)
|
452 |
+
|
453 |
+
def test_excitingmixing(self):
|
454 |
+
self._check_dot(nonlin.ExcitingMixing, complex=False)
|
455 |
+
self._check_dot(nonlin.ExcitingMixing, complex=True)
|
456 |
+
|
457 |
+
def test_krylov(self):
|
458 |
+
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
|
459 |
+
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
|
460 |
+
|
461 |
+
|
462 |
+
class TestNonlinOldTests:
|
463 |
+
""" Test case for a simple constrained entropy maximization problem
|
464 |
+
(the machine translation example of Berger et al in
|
465 |
+
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
|
466 |
+
"""
|
467 |
+
|
468 |
+
def test_broyden1(self):
|
469 |
+
x = nonlin.broyden1(F, F.xin, iter=12, alpha=1)
|
470 |
+
assert_(nonlin.norm(x) < 1e-9)
|
471 |
+
assert_(nonlin.norm(F(x)) < 1e-9)
|
472 |
+
|
473 |
+
def test_broyden2(self):
|
474 |
+
x = nonlin.broyden2(F, F.xin, iter=12, alpha=1)
|
475 |
+
assert_(nonlin.norm(x) < 1e-9)
|
476 |
+
assert_(nonlin.norm(F(x)) < 1e-9)
|
477 |
+
|
478 |
+
def test_anderson(self):
|
479 |
+
x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5)
|
480 |
+
assert_(nonlin.norm(x) < 0.33)
|
481 |
+
|
482 |
+
def test_linearmixing(self):
|
483 |
+
x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5)
|
484 |
+
assert_(nonlin.norm(x) < 1e-7)
|
485 |
+
assert_(nonlin.norm(F(x)) < 1e-7)
|
486 |
+
|
487 |
+
def test_exciting(self):
|
488 |
+
x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5)
|
489 |
+
assert_(nonlin.norm(x) < 1e-5)
|
490 |
+
assert_(nonlin.norm(F(x)) < 1e-5)
|
491 |
+
|
492 |
+
def test_diagbroyden(self):
|
493 |
+
x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1)
|
494 |
+
assert_(nonlin.norm(x) < 1e-8)
|
495 |
+
assert_(nonlin.norm(F(x)) < 1e-8)
|
496 |
+
|
497 |
+
def test_root_broyden1(self):
|
498 |
+
res = root(F, F.xin, method='broyden1',
|
499 |
+
options={'nit': 12, 'jac_options': {'alpha': 1}})
|
500 |
+
assert_(nonlin.norm(res.x) < 1e-9)
|
501 |
+
assert_(nonlin.norm(res.fun) < 1e-9)
|
502 |
+
|
503 |
+
def test_root_broyden2(self):
|
504 |
+
res = root(F, F.xin, method='broyden2',
|
505 |
+
options={'nit': 12, 'jac_options': {'alpha': 1}})
|
506 |
+
assert_(nonlin.norm(res.x) < 1e-9)
|
507 |
+
assert_(nonlin.norm(res.fun) < 1e-9)
|
508 |
+
|
509 |
+
def test_root_anderson(self):
|
510 |
+
res = root(F, F.xin, method='anderson',
|
511 |
+
options={'nit': 12,
|
512 |
+
'jac_options': {'alpha': 0.03, 'M': 5}})
|
513 |
+
assert_(nonlin.norm(res.x) < 0.33)
|
514 |
+
|
515 |
+
def test_root_linearmixing(self):
|
516 |
+
res = root(F, F.xin, method='linearmixing',
|
517 |
+
options={'nit': 60,
|
518 |
+
'jac_options': {'alpha': 0.5}})
|
519 |
+
assert_(nonlin.norm(res.x) < 1e-7)
|
520 |
+
assert_(nonlin.norm(res.fun) < 1e-7)
|
521 |
+
|
522 |
+
def test_root_excitingmixing(self):
|
523 |
+
res = root(F, F.xin, method='excitingmixing',
|
524 |
+
options={'nit': 20,
|
525 |
+
'jac_options': {'alpha': 0.5}})
|
526 |
+
assert_(nonlin.norm(res.x) < 1e-5)
|
527 |
+
assert_(nonlin.norm(res.fun) < 1e-5)
|
528 |
+
|
529 |
+
def test_root_diagbroyden(self):
|
530 |
+
res = root(F, F.xin, method='diagbroyden',
|
531 |
+
options={'nit': 11,
|
532 |
+
'jac_options': {'alpha': 1}})
|
533 |
+
assert_(nonlin.norm(res.x) < 1e-8)
|
534 |
+
assert_(nonlin.norm(res.fun) < 1e-8)
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for trust-region optimization routines.
|
3 |
+
|
4 |
+
To run it in its simplest form::
|
5 |
+
nosetests test_optimize.py
|
6 |
+
|
7 |
+
"""
|
8 |
+
import pytest
|
9 |
+
import numpy as np
|
10 |
+
from numpy.testing import assert_, assert_equal, assert_allclose
|
11 |
+
from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
|
12 |
+
rosen_hess_prod)
|
13 |
+
|
14 |
+
|
15 |
+
class Accumulator:
|
16 |
+
""" This is for testing callbacks."""
|
17 |
+
def __init__(self):
|
18 |
+
self.count = 0
|
19 |
+
self.accum = None
|
20 |
+
|
21 |
+
def __call__(self, x):
|
22 |
+
self.count += 1
|
23 |
+
if self.accum is None:
|
24 |
+
self.accum = np.array(x)
|
25 |
+
else:
|
26 |
+
self.accum += x
|
27 |
+
|
28 |
+
|
29 |
+
class TestTrustRegionSolvers:
|
30 |
+
|
31 |
+
def setup_method(self):
|
32 |
+
self.x_opt = [1.0, 1.0]
|
33 |
+
self.easy_guess = [2.0, 2.0]
|
34 |
+
self.hard_guess = [-1.2, 1.0]
|
35 |
+
|
36 |
+
def test_dogleg_accuracy(self):
|
37 |
+
# test the accuracy and the return_all option
|
38 |
+
x0 = self.hard_guess
|
39 |
+
r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
|
40 |
+
method='dogleg', options={'return_all': True},)
|
41 |
+
assert_allclose(x0, r['allvecs'][0])
|
42 |
+
assert_allclose(r['x'], r['allvecs'][-1])
|
43 |
+
assert_allclose(r['x'], self.x_opt)
|
44 |
+
|
45 |
+
def test_dogleg_callback(self):
|
46 |
+
# test the callback mechanism and the maxiter and return_all options
|
47 |
+
accumulator = Accumulator()
|
48 |
+
maxiter = 5
|
49 |
+
r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
|
50 |
+
callback=accumulator, method='dogleg',
|
51 |
+
options={'return_all': True, 'maxiter': maxiter},)
|
52 |
+
assert_equal(accumulator.count, maxiter)
|
53 |
+
assert_equal(len(r['allvecs']), maxiter+1)
|
54 |
+
assert_allclose(r['x'], r['allvecs'][-1])
|
55 |
+
assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
|
56 |
+
|
57 |
+
def test_dogleg_user_warning(self):
|
58 |
+
with pytest.warns(RuntimeWarning,
|
59 |
+
match=r'Maximum number of iterations'):
|
60 |
+
minimize(rosen, self.hard_guess, jac=rosen_der,
|
61 |
+
hess=rosen_hess, method='dogleg',
|
62 |
+
options={'disp': True, 'maxiter': 1}, )
|
63 |
+
|
64 |
+
def test_solver_concordance(self):
|
65 |
+
# Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
|
66 |
+
# test function, although this does not necessarily mean
|
67 |
+
# that dogleg is faster or better than ncg even for this function
|
68 |
+
# and especially not for other test functions.
|
69 |
+
f = rosen
|
70 |
+
g = rosen_der
|
71 |
+
h = rosen_hess
|
72 |
+
for x0 in (self.easy_guess, self.hard_guess):
|
73 |
+
r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
74 |
+
method='dogleg', options={'return_all': True})
|
75 |
+
r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
76 |
+
method='trust-ncg',
|
77 |
+
options={'return_all': True})
|
78 |
+
r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
79 |
+
method='trust-krylov',
|
80 |
+
options={'return_all': True})
|
81 |
+
r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
82 |
+
method='newton-cg', options={'return_all': True})
|
83 |
+
r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
84 |
+
method='trust-exact',
|
85 |
+
options={'return_all': True})
|
86 |
+
assert_allclose(self.x_opt, r_dogleg['x'])
|
87 |
+
assert_allclose(self.x_opt, r_trust_ncg['x'])
|
88 |
+
assert_allclose(self.x_opt, r_trust_krylov['x'])
|
89 |
+
assert_allclose(self.x_opt, r_ncg['x'])
|
90 |
+
assert_allclose(self.x_opt, r_iterative['x'])
|
91 |
+
assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
|
92 |
+
|
93 |
+
def test_trust_ncg_hessp(self):
|
94 |
+
for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
|
95 |
+
r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
|
96 |
+
tol=1e-8, method='trust-ncg')
|
97 |
+
assert_allclose(self.x_opt, r['x'])
|
98 |
+
|
99 |
+
def test_trust_ncg_start_in_optimum(self):
|
100 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
101 |
+
tol=1e-8, method='trust-ncg')
|
102 |
+
assert_allclose(self.x_opt, r['x'])
|
103 |
+
|
104 |
+
def test_trust_krylov_start_in_optimum(self):
|
105 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
106 |
+
tol=1e-8, method='trust-krylov')
|
107 |
+
assert_allclose(self.x_opt, r['x'])
|
108 |
+
|
109 |
+
def test_trust_exact_start_in_optimum(self):
|
110 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
111 |
+
tol=1e-8, method='trust-exact')
|
112 |
+
assert_allclose(self.x_opt, r['x'])
|
venv/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py
ADDED
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for trust-region iterative subproblem.
|
3 |
+
|
4 |
+
To run it in its simplest form::
|
5 |
+
nosetests test_optimize.py
|
6 |
+
|
7 |
+
"""
|
8 |
+
import numpy as np
|
9 |
+
from scipy.optimize._trustregion_exact import (
|
10 |
+
estimate_smallest_singular_value,
|
11 |
+
singular_leading_submatrix,
|
12 |
+
IterativeSubproblem)
|
13 |
+
from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm)
|
14 |
+
from numpy.testing import (assert_array_equal,
|
15 |
+
assert_equal, assert_array_almost_equal)
|
16 |
+
|
17 |
+
|
18 |
+
def random_entry(n, min_eig, max_eig, case):
|
19 |
+
|
20 |
+
# Generate random matrix
|
21 |
+
rand = np.random.uniform(-1, 1, (n, n))
|
22 |
+
|
23 |
+
# QR decomposition
|
24 |
+
Q, _, _ = qr(rand, pivoting='True')
|
25 |
+
|
26 |
+
# Generate random eigenvalues
|
27 |
+
eigvalues = np.random.uniform(min_eig, max_eig, n)
|
28 |
+
eigvalues = np.sort(eigvalues)[::-1]
|
29 |
+
|
30 |
+
# Generate matrix
|
31 |
+
Qaux = np.multiply(eigvalues, Q)
|
32 |
+
A = np.dot(Qaux, Q.T)
|
33 |
+
|
34 |
+
# Generate gradient vector accordingly
|
35 |
+
# to the case is being tested.
|
36 |
+
if case == 'hard':
|
37 |
+
g = np.zeros(n)
|
38 |
+
g[:-1] = np.random.uniform(-1, 1, n-1)
|
39 |
+
g = np.dot(Q, g)
|
40 |
+
elif case == 'jac_equal_zero':
|
41 |
+
g = np.zeros(n)
|
42 |
+
else:
|
43 |
+
g = np.random.uniform(-1, 1, n)
|
44 |
+
|
45 |
+
return A, g
|
46 |
+
|
47 |
+
|
48 |
+
class TestEstimateSmallestSingularValue:
|
49 |
+
|
50 |
+
def test_for_ill_condiotioned_matrix(self):
|
51 |
+
|
52 |
+
# Ill-conditioned triangular matrix
|
53 |
+
C = np.array([[1, 2, 3, 4],
|
54 |
+
[0, 0.05, 60, 7],
|
55 |
+
[0, 0, 0.8, 9],
|
56 |
+
[0, 0, 0, 10]])
|
57 |
+
|
58 |
+
# Get svd decomposition
|
59 |
+
U, s, Vt = svd(C)
|
60 |
+
|
61 |
+
# Get smallest singular value and correspondent right singular vector.
|
62 |
+
smin_svd = s[-1]
|
63 |
+
zmin_svd = Vt[-1, :]
|
64 |
+
|
65 |
+
# Estimate smallest singular value
|
66 |
+
smin, zmin = estimate_smallest_singular_value(C)
|
67 |
+
|
68 |
+
# Check the estimation
|
69 |
+
assert_array_almost_equal(smin, smin_svd, decimal=8)
|
70 |
+
assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
|
71 |
+
|
72 |
+
|
73 |
+
class TestSingularLeadingSubmatrix:
|
74 |
+
|
75 |
+
def test_for_already_singular_leading_submatrix(self):
|
76 |
+
|
77 |
+
# Define test matrix A.
|
78 |
+
# Note that the leading 2x2 submatrix is singular.
|
79 |
+
A = np.array([[1, 2, 3],
|
80 |
+
[2, 4, 5],
|
81 |
+
[3, 5, 6]])
|
82 |
+
|
83 |
+
# Get Cholesky from lapack functions
|
84 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
85 |
+
|
86 |
+
# Compute Cholesky Decomposition
|
87 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
88 |
+
|
89 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
90 |
+
|
91 |
+
A[k-1, k-1] += delta
|
92 |
+
|
93 |
+
# Check if the leading submatrix is singular.
|
94 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
95 |
+
|
96 |
+
# Check if `v` fulfil the specified properties
|
97 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
98 |
+
assert_array_almost_equal(quadratic_term, 0)
|
99 |
+
|
100 |
+
def test_for_simetric_indefinite_matrix(self):
|
101 |
+
|
102 |
+
# Define test matrix A.
|
103 |
+
# Note that the leading 5x5 submatrix is indefinite.
|
104 |
+
A = np.asarray([[1, 2, 3, 7, 8],
|
105 |
+
[2, 5, 5, 9, 0],
|
106 |
+
[3, 5, 11, 1, 2],
|
107 |
+
[7, 9, 1, 7, 5],
|
108 |
+
[8, 0, 2, 5, 8]])
|
109 |
+
|
110 |
+
# Get Cholesky from lapack functions
|
111 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
112 |
+
|
113 |
+
# Compute Cholesky Decomposition
|
114 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
115 |
+
|
116 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
117 |
+
|
118 |
+
A[k-1, k-1] += delta
|
119 |
+
|
120 |
+
# Check if the leading submatrix is singular.
|
121 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
122 |
+
|
123 |
+
# Check if `v` fulfil the specified properties
|
124 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
125 |
+
assert_array_almost_equal(quadratic_term, 0)
|
126 |
+
|
127 |
+
def test_for_first_element_equal_to_zero(self):
|
128 |
+
|
129 |
+
# Define test matrix A.
|
130 |
+
# Note that the leading 2x2 submatrix is singular.
|
131 |
+
A = np.array([[0, 3, 11],
|
132 |
+
[3, 12, 5],
|
133 |
+
[11, 5, 6]])
|
134 |
+
|
135 |
+
# Get Cholesky from lapack functions
|
136 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
137 |
+
|
138 |
+
# Compute Cholesky Decomposition
|
139 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
140 |
+
|
141 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
142 |
+
|
143 |
+
A[k-1, k-1] += delta
|
144 |
+
|
145 |
+
# Check if the leading submatrix is singular
|
146 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
147 |
+
|
148 |
+
# Check if `v` fulfil the specified properties
|
149 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
150 |
+
assert_array_almost_equal(quadratic_term, 0)
|
151 |
+
|
152 |
+
|
153 |
+
class TestIterativeSubproblem:
|
154 |
+
|
155 |
+
def test_for_the_easy_case(self):
|
156 |
+
|
157 |
+
# `H` is chosen such that `g` is not orthogonal to the
|
158 |
+
# eigenvector associated with the smallest eigenvalue `s`.
|
159 |
+
H = [[10, 2, 3, 4],
|
160 |
+
[2, 1, 7, 1],
|
161 |
+
[3, 7, 1, 7],
|
162 |
+
[4, 1, 7, 2]]
|
163 |
+
g = [1, 1, 1, 1]
|
164 |
+
|
165 |
+
# Trust Radius
|
166 |
+
trust_radius = 1
|
167 |
+
|
168 |
+
# Solve Subproblem
|
169 |
+
subprob = IterativeSubproblem(x=0,
|
170 |
+
fun=lambda x: 0,
|
171 |
+
jac=lambda x: np.array(g),
|
172 |
+
hess=lambda x: np.array(H),
|
173 |
+
k_easy=1e-10,
|
174 |
+
k_hard=1e-10)
|
175 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
176 |
+
|
177 |
+
assert_array_almost_equal(p, [0.00393332, -0.55260862,
|
178 |
+
0.67065477, -0.49480341])
|
179 |
+
assert_array_almost_equal(hits_boundary, True)
|
180 |
+
|
181 |
+
def test_for_the_hard_case(self):
|
182 |
+
|
183 |
+
# `H` is chosen such that `g` is orthogonal to the
|
184 |
+
# eigenvector associated with the smallest eigenvalue `s`.
|
185 |
+
H = [[10, 2, 3, 4],
|
186 |
+
[2, 1, 7, 1],
|
187 |
+
[3, 7, 1, 7],
|
188 |
+
[4, 1, 7, 2]]
|
189 |
+
g = [6.4852641521327437, 1, 1, 1]
|
190 |
+
s = -8.2151519874416614
|
191 |
+
|
192 |
+
# Trust Radius
|
193 |
+
trust_radius = 1
|
194 |
+
|
195 |
+
# Solve Subproblem
|
196 |
+
subprob = IterativeSubproblem(x=0,
|
197 |
+
fun=lambda x: 0,
|
198 |
+
jac=lambda x: np.array(g),
|
199 |
+
hess=lambda x: np.array(H),
|
200 |
+
k_easy=1e-10,
|
201 |
+
k_hard=1e-10)
|
202 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
203 |
+
|
204 |
+
assert_array_almost_equal(-s, subprob.lambda_current)
|
205 |
+
|
206 |
+
def test_for_interior_convergence(self):
|
207 |
+
|
208 |
+
H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
|
209 |
+
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
|
210 |
+
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
|
211 |
+
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
|
212 |
+
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]
|
213 |
+
|
214 |
+
g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]
|
215 |
+
|
216 |
+
# Solve Subproblem
|
217 |
+
subprob = IterativeSubproblem(x=0,
|
218 |
+
fun=lambda x: 0,
|
219 |
+
jac=lambda x: np.array(g),
|
220 |
+
hess=lambda x: np.array(H))
|
221 |
+
p, hits_boundary = subprob.solve(1.1)
|
222 |
+
|
223 |
+
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
|
224 |
+
-0.67005053, 0.31586769])
|
225 |
+
assert_array_almost_equal(hits_boundary, False)
|
226 |
+
assert_array_almost_equal(subprob.lambda_current, 0)
|
227 |
+
assert_array_almost_equal(subprob.niter, 1)
|
228 |
+
|
229 |
+
def test_for_jac_equal_zero(self):
|
230 |
+
|
231 |
+
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
|
232 |
+
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
|
233 |
+
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
|
234 |
+
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
|
235 |
+
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
|
236 |
+
|
237 |
+
g = [0, 0, 0, 0, 0]
|
238 |
+
|
239 |
+
# Solve Subproblem
|
240 |
+
subprob = IterativeSubproblem(x=0,
|
241 |
+
fun=lambda x: 0,
|
242 |
+
jac=lambda x: np.array(g),
|
243 |
+
hess=lambda x: np.array(H),
|
244 |
+
k_easy=1e-10,
|
245 |
+
k_hard=1e-10)
|
246 |
+
p, hits_boundary = subprob.solve(1.1)
|
247 |
+
|
248 |
+
assert_array_almost_equal(p, [0.06910534, -0.01432721,
|
249 |
+
-0.65311947, -0.23815972,
|
250 |
+
-0.84954934])
|
251 |
+
assert_array_almost_equal(hits_boundary, True)
|
252 |
+
|
253 |
+
def test_for_jac_very_close_to_zero(self):
|
254 |
+
|
255 |
+
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
|
256 |
+
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
|
257 |
+
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
|
258 |
+
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
|
259 |
+
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
|
260 |
+
|
261 |
+
g = [0, 0, 0, 0, 1e-15]
|
262 |
+
|
263 |
+
# Solve Subproblem
|
264 |
+
subprob = IterativeSubproblem(x=0,
|
265 |
+
fun=lambda x: 0,
|
266 |
+
jac=lambda x: np.array(g),
|
267 |
+
hess=lambda x: np.array(H),
|
268 |
+
k_easy=1e-10,
|
269 |
+
k_hard=1e-10)
|
270 |
+
p, hits_boundary = subprob.solve(1.1)
|
271 |
+
|
272 |
+
assert_array_almost_equal(p, [0.06910534, -0.01432721,
|
273 |
+
-0.65311947, -0.23815972,
|
274 |
+
-0.84954934])
|
275 |
+
assert_array_almost_equal(hits_boundary, True)
|
276 |
+
|
277 |
+
def test_for_random_entries(self):
|
278 |
+
# Seed
|
279 |
+
np.random.seed(1)
|
280 |
+
|
281 |
+
# Dimension
|
282 |
+
n = 5
|
283 |
+
|
284 |
+
for case in ('easy', 'hard', 'jac_equal_zero'):
|
285 |
+
|
286 |
+
eig_limits = [(-20, -15),
|
287 |
+
(-10, -5),
|
288 |
+
(-10, 0),
|
289 |
+
(-5, 5),
|
290 |
+
(-10, 10),
|
291 |
+
(0, 10),
|
292 |
+
(5, 10),
|
293 |
+
(15, 20)]
|
294 |
+
|
295 |
+
for min_eig, max_eig in eig_limits:
|
296 |
+
# Generate random symmetric matrix H with
|
297 |
+
# eigenvalues between min_eig and max_eig.
|
298 |
+
H, g = random_entry(n, min_eig, max_eig, case)
|
299 |
+
|
300 |
+
# Trust radius
|
301 |
+
trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10]
|
302 |
+
|
303 |
+
for trust_radius in trust_radius_list:
|
304 |
+
# Solve subproblem with very high accuracy
|
305 |
+
subprob_ac = IterativeSubproblem(0,
|
306 |
+
lambda x: 0,
|
307 |
+
lambda x: g,
|
308 |
+
lambda x: H,
|
309 |
+
k_easy=1e-10,
|
310 |
+
k_hard=1e-10)
|
311 |
+
|
312 |
+
p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius)
|
313 |
+
|
314 |
+
# Compute objective function value
|
315 |
+
J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac)
|
316 |
+
|
317 |
+
stop_criteria = [(0.1, 2),
|
318 |
+
(0.5, 1.1),
|
319 |
+
(0.9, 1.01)]
|
320 |
+
|
321 |
+
for k_opt, k_trf in stop_criteria:
|
322 |
+
|
323 |
+
# k_easy and k_hard computed in function
|
324 |
+
# of k_opt and k_trf accordingly to
|
325 |
+
# Conn, A. R., Gould, N. I., & Toint, P. L. (2000).
|
326 |
+
# "Trust region methods". Siam. p. 197.
|
327 |
+
k_easy = min(k_trf-1,
|
328 |
+
1-np.sqrt(k_opt))
|
329 |
+
k_hard = 1-k_opt
|
330 |
+
|
331 |
+
# Solve subproblem
|
332 |
+
subprob = IterativeSubproblem(0,
|
333 |
+
lambda x: 0,
|
334 |
+
lambda x: g,
|
335 |
+
lambda x: H,
|
336 |
+
k_easy=k_easy,
|
337 |
+
k_hard=k_hard)
|
338 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
339 |
+
|
340 |
+
# Compute objective function value
|
341 |
+
J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p)
|
342 |
+
|
343 |
+
# Check if it respect k_trf
|
344 |
+
if hits_boundary:
|
345 |
+
assert_array_equal(np.abs(norm(p)-trust_radius) <=
|
346 |
+
(k_trf-1)*trust_radius, True)
|
347 |
+
else:
|
348 |
+
assert_equal(norm(p) <= trust_radius, True)
|
349 |
+
|
350 |
+
# Check if it respect k_opt
|
351 |
+
assert_equal(J <= k_opt*J_ac, True)
|
352 |
+
|
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_arraytools.cpython-310.pyc
ADDED
Binary file (8 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_bsplines.cpython-310.pyc
ADDED
Binary file (14.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_czt.cpython-310.pyc
ADDED
Binary file (19.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|