Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minimize.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nnls.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tnc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tstutils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd +106 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd +56 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd +20 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd +46 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd +9 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd +10 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd +7 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py +5 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py +183 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py +733 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py +331 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py +967 -0
ckpts/universal/global_step40/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4de0efaad23d85cb79c51079cf5700a051fa38ce250bc9133ba6a924d55ecf90
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27bbb6e3cede6ed8b868058d86be8427f120ed3727f275337b275178fbcb5ab0
|
3 |
+
size 50332749
|
ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a68e0196e0a00a7c2d43e5e650f8d857813265c01cfb93a7957becaf88036b79
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a99c781e0126feca843c03f4b77f53ac955b98dac7040507ffbfd595128da3ac
|
3 |
+
size 33555627
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc
ADDED
Binary file (26.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc
ADDED
Binary file (9.96 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc
ADDED
Binary file (16.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc
ADDED
Binary file (16.8 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc
ADDED
Binary file (61.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc
ADDED
Binary file (22 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc
ADDED
Binary file (46.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_minimize.cpython-310.pyc
ADDED
Binary file (40.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_nnls.cpython-310.pyc
ADDED
Binary file (4.65 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc
ADDED
Binary file (23.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc
ADDED
Binary file (114 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc
ADDED
Binary file (25.3 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc
ADDED
Binary file (15 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tnc.cpython-310.pyc
ADDED
Binary file (13.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc
ADDED
Binary file (3.59 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc
ADDED
Binary file (8.92 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_tstutils.cpython-310.pyc
ADDED
Binary file (29.5 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc
ADDED
Binary file (42.8 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc
ADDED
Binary file (729 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc
ADDED
Binary file (620 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc
ADDED
Binary file (599 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc
ADDED
Binary file (767 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
from libcpp cimport bool
|
4 |
+
from libcpp.string cimport string
|
5 |
+
|
6 |
+
cdef extern from "HConst.h" nogil:
|
7 |
+
|
8 |
+
const int HIGHS_CONST_I_INF "kHighsIInf"
|
9 |
+
const double HIGHS_CONST_INF "kHighsInf"
|
10 |
+
const double kHighsTiny
|
11 |
+
const double kHighsZero
|
12 |
+
const int kHighsThreadLimit
|
13 |
+
|
14 |
+
cdef enum HighsDebugLevel:
|
15 |
+
HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0
|
16 |
+
HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap"
|
17 |
+
HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly"
|
18 |
+
HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive"
|
19 |
+
HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone
|
20 |
+
HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive
|
21 |
+
|
22 |
+
ctypedef enum HighsModelStatus:
|
23 |
+
HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0
|
24 |
+
HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError"
|
25 |
+
HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError"
|
26 |
+
HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError"
|
27 |
+
HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError"
|
28 |
+
HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError"
|
29 |
+
HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty"
|
30 |
+
HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal"
|
31 |
+
HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible"
|
32 |
+
HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible"
|
33 |
+
HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded"
|
34 |
+
HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound"
|
35 |
+
HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget"
|
36 |
+
HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit"
|
37 |
+
HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit"
|
38 |
+
HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown"
|
39 |
+
HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET
|
40 |
+
HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN
|
41 |
+
|
42 |
+
cdef enum HighsBasisStatus:
|
43 |
+
HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables]
|
44 |
+
HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic
|
45 |
+
HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound
|
46 |
+
HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero
|
47 |
+
HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve
|
48 |
+
|
49 |
+
cdef enum SolverOption:
|
50 |
+
SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1
|
51 |
+
SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE"
|
52 |
+
SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM"
|
53 |
+
|
54 |
+
cdef enum PrimalDualStatus:
|
55 |
+
PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1
|
56 |
+
PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET
|
57 |
+
PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION"
|
58 |
+
PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN"
|
59 |
+
PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT"
|
60 |
+
PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT"
|
61 |
+
PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT
|
62 |
+
|
63 |
+
cdef enum HighsOptionType:
|
64 |
+
HighsOptionTypeBOOL "HighsOptionType::kBool" = 0
|
65 |
+
HighsOptionTypeINT "HighsOptionType::kInt"
|
66 |
+
HighsOptionTypeDOUBLE "HighsOptionType::kDouble"
|
67 |
+
HighsOptionTypeSTRING "HighsOptionType::kString"
|
68 |
+
|
69 |
+
# workaround for lack of enum class support in Cython < 3.x
|
70 |
+
# cdef enum class ObjSense(int):
|
71 |
+
# ObjSenseMINIMIZE "ObjSense::kMinimize" = 1
|
72 |
+
# ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1
|
73 |
+
|
74 |
+
cdef cppclass ObjSense:
|
75 |
+
pass
|
76 |
+
|
77 |
+
cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize"
|
78 |
+
cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize"
|
79 |
+
|
80 |
+
# cdef enum class MatrixFormat(int):
|
81 |
+
# MatrixFormatkColwise "MatrixFormat::kColwise" = 1
|
82 |
+
# MatrixFormatkRowwise "MatrixFormat::kRowwise"
|
83 |
+
# MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
|
84 |
+
|
85 |
+
cdef cppclass MatrixFormat:
|
86 |
+
pass
|
87 |
+
|
88 |
+
cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise"
|
89 |
+
cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise"
|
90 |
+
cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
|
91 |
+
|
92 |
+
# cdef enum class HighsVarType(int):
|
93 |
+
# kContinuous "HighsVarType::kContinuous"
|
94 |
+
# kInteger "HighsVarType::kInteger"
|
95 |
+
# kSemiContinuous "HighsVarType::kSemiContinuous"
|
96 |
+
# kSemiInteger "HighsVarType::kSemiInteger"
|
97 |
+
# kImplicitInteger "HighsVarType::kImplicitInteger"
|
98 |
+
|
99 |
+
cdef cppclass HighsVarType:
|
100 |
+
pass
|
101 |
+
|
102 |
+
cdef HighsVarType kContinuous "HighsVarType::kContinuous"
|
103 |
+
cdef HighsVarType kInteger "HighsVarType::kInteger"
|
104 |
+
cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous"
|
105 |
+
cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger"
|
106 |
+
cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger"
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
from libc.stdio cimport FILE
|
4 |
+
|
5 |
+
from libcpp cimport bool
|
6 |
+
from libcpp.string cimport string
|
7 |
+
|
8 |
+
from .HighsStatus cimport HighsStatus
|
9 |
+
from .HighsOptions cimport HighsOptions
|
10 |
+
from .HighsInfo cimport HighsInfo
|
11 |
+
from .HighsLp cimport (
|
12 |
+
HighsLp,
|
13 |
+
HighsSolution,
|
14 |
+
HighsBasis,
|
15 |
+
ObjSense,
|
16 |
+
)
|
17 |
+
from .HConst cimport HighsModelStatus
|
18 |
+
|
19 |
+
cdef extern from "Highs.h":
|
20 |
+
# From HiGHS/src/Highs.h
|
21 |
+
cdef cppclass Highs:
|
22 |
+
HighsStatus passHighsOptions(const HighsOptions& options)
|
23 |
+
HighsStatus passModel(const HighsLp& lp)
|
24 |
+
HighsStatus run()
|
25 |
+
HighsStatus setHighsLogfile(FILE* logfile)
|
26 |
+
HighsStatus setHighsOutput(FILE* output)
|
27 |
+
HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true)
|
28 |
+
|
29 |
+
# split up for cython below
|
30 |
+
#const HighsModelStatus& getModelStatus(const bool scaled_model = False) const
|
31 |
+
const HighsModelStatus & getModelStatus() const
|
32 |
+
|
33 |
+
const HighsInfo& getHighsInfo "getInfo" () const
|
34 |
+
string modelStatusToString(const HighsModelStatus model_status) const
|
35 |
+
#HighsStatus getHighsInfoValue(const string& info, int& value)
|
36 |
+
HighsStatus getHighsInfoValue(const string& info, double& value) const
|
37 |
+
const HighsOptions& getHighsOptions() const
|
38 |
+
|
39 |
+
const HighsLp& getLp() const
|
40 |
+
|
41 |
+
HighsStatus writeSolution(const string filename, const bool pretty) const
|
42 |
+
|
43 |
+
HighsStatus setBasis()
|
44 |
+
const HighsSolution& getSolution() const
|
45 |
+
const HighsBasis& getBasis() const
|
46 |
+
|
47 |
+
bool changeObjectiveSense(const ObjSense sense)
|
48 |
+
|
49 |
+
HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value)
|
50 |
+
HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value)
|
51 |
+
HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value)
|
52 |
+
HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value)
|
53 |
+
|
54 |
+
string primalDualStatusToString(const int primal_dual_status)
|
55 |
+
|
56 |
+
void resetGlobalScheduler(bool blocking)
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
|
4 |
+
cdef extern from "HighsIO.h" nogil:
|
5 |
+
# workaround for lack of enum class support in Cython < 3.x
|
6 |
+
# cdef enum class HighsLogType(int):
|
7 |
+
# kInfo "HighsLogType::kInfo" = 1
|
8 |
+
# kDetailed "HighsLogType::kDetailed"
|
9 |
+
# kVerbose "HighsLogType::kVerbose"
|
10 |
+
# kWarning "HighsLogType::kWarning"
|
11 |
+
# kError "HighsLogType::kError"
|
12 |
+
|
13 |
+
cdef cppclass HighsLogType:
|
14 |
+
pass
|
15 |
+
|
16 |
+
cdef HighsLogType kInfo "HighsLogType::kInfo"
|
17 |
+
cdef HighsLogType kDetailed "HighsLogType::kDetailed"
|
18 |
+
cdef HighsLogType kVerbose "HighsLogType::kVerbose"
|
19 |
+
cdef HighsLogType kWarning "HighsLogType::kWarning"
|
20 |
+
cdef HighsLogType kError "HighsLogType::kError"
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
from libcpp cimport bool
|
4 |
+
from libcpp.string cimport string
|
5 |
+
from libcpp.vector cimport vector
|
6 |
+
|
7 |
+
from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType
|
8 |
+
from .HighsSparseMatrix cimport HighsSparseMatrix
|
9 |
+
|
10 |
+
|
11 |
+
cdef extern from "HighsLp.h" nogil:
|
12 |
+
# From HiGHS/src/lp_data/HighsLp.h
|
13 |
+
cdef cppclass HighsLp:
|
14 |
+
int num_col_
|
15 |
+
int num_row_
|
16 |
+
|
17 |
+
vector[double] col_cost_
|
18 |
+
vector[double] col_lower_
|
19 |
+
vector[double] col_upper_
|
20 |
+
vector[double] row_lower_
|
21 |
+
vector[double] row_upper_
|
22 |
+
|
23 |
+
HighsSparseMatrix a_matrix_
|
24 |
+
|
25 |
+
ObjSense sense_
|
26 |
+
double offset_
|
27 |
+
|
28 |
+
string model_name_
|
29 |
+
|
30 |
+
vector[string] row_names_
|
31 |
+
vector[string] col_names_
|
32 |
+
|
33 |
+
vector[HighsVarType] integrality_
|
34 |
+
|
35 |
+
bool isMip() const
|
36 |
+
|
37 |
+
cdef cppclass HighsSolution:
|
38 |
+
vector[double] col_value
|
39 |
+
vector[double] col_dual
|
40 |
+
vector[double] row_value
|
41 |
+
vector[double] row_dual
|
42 |
+
|
43 |
+
cdef cppclass HighsBasis:
|
44 |
+
bool valid_
|
45 |
+
vector[HighsBasisStatus] col_status
|
46 |
+
vector[HighsBasisStatus] row_status
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
from .HighsStatus cimport HighsStatus
|
4 |
+
from .HighsLp cimport HighsLp
|
5 |
+
from .HighsOptions cimport HighsOptions
|
6 |
+
|
7 |
+
cdef extern from "HighsLpUtils.h" nogil:
|
8 |
+
# From HiGHS/src/lp_data/HighsLpUtils.h
|
9 |
+
HighsStatus assessLp(HighsLp& lp, const HighsOptions& options)
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
from libcpp.string cimport string
|
4 |
+
|
5 |
+
from .HConst cimport HighsModelStatus
|
6 |
+
|
7 |
+
cdef extern from "HighsModelUtils.h" nogil:
|
8 |
+
# From HiGHS/src/lp_data/HighsModelUtils.h
|
9 |
+
string utilHighsModelStatusToString(const HighsModelStatus model_status)
|
10 |
+
string utilBasisStatusToString(const int primal_dual_status)
|
venv/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cython: language_level=3
|
2 |
+
|
3 |
+
cdef extern from "highs_c_api.h" nogil:
|
4 |
+
int Highs_passLp(void* highs, int numcol, int numrow, int numnz,
|
5 |
+
double* colcost, double* collower, double* colupper,
|
6 |
+
double* rowlower, double* rowupper,
|
7 |
+
int* astart, int* aindex, double* avalue)
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This module contains least-squares algorithms."""
|
2 |
+
from .least_squares import least_squares
|
3 |
+
from .lsq_linear import lsq_linear
|
4 |
+
|
5 |
+
__all__ = ['least_squares', 'lsq_linear']
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (348 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc
ADDED
Binary file (2.73 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc
ADDED
Binary file (8.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc
ADDED
Binary file (36.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc
ADDED
Binary file (14 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc
ADDED
Binary file (5.66 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Bounded-variable least-squares algorithm."""
|
2 |
+
import numpy as np
|
3 |
+
from numpy.linalg import norm, lstsq
|
4 |
+
from scipy.optimize import OptimizeResult
|
5 |
+
|
6 |
+
from .common import print_header_linear, print_iteration_linear
|
7 |
+
|
8 |
+
|
9 |
+
def compute_kkt_optimality(g, on_bound):
|
10 |
+
"""Compute the maximum violation of KKT conditions."""
|
11 |
+
g_kkt = g * on_bound
|
12 |
+
free_set = on_bound == 0
|
13 |
+
g_kkt[free_set] = np.abs(g[free_set])
|
14 |
+
return np.max(g_kkt)
|
15 |
+
|
16 |
+
|
17 |
+
def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None):
|
18 |
+
m, n = A.shape
|
19 |
+
|
20 |
+
x = x_lsq.copy()
|
21 |
+
on_bound = np.zeros(n)
|
22 |
+
|
23 |
+
mask = x <= lb
|
24 |
+
x[mask] = lb[mask]
|
25 |
+
on_bound[mask] = -1
|
26 |
+
|
27 |
+
mask = x >= ub
|
28 |
+
x[mask] = ub[mask]
|
29 |
+
on_bound[mask] = 1
|
30 |
+
|
31 |
+
free_set = on_bound == 0
|
32 |
+
active_set = ~free_set
|
33 |
+
free_set, = np.nonzero(free_set)
|
34 |
+
|
35 |
+
r = A.dot(x) - b
|
36 |
+
cost = 0.5 * np.dot(r, r)
|
37 |
+
initial_cost = cost
|
38 |
+
g = A.T.dot(r)
|
39 |
+
|
40 |
+
cost_change = None
|
41 |
+
step_norm = None
|
42 |
+
iteration = 0
|
43 |
+
|
44 |
+
if verbose == 2:
|
45 |
+
print_header_linear()
|
46 |
+
|
47 |
+
# This is the initialization loop. The requirement is that the
|
48 |
+
# least-squares solution on free variables is feasible before BVLS starts.
|
49 |
+
# One possible initialization is to set all variables to lower or upper
|
50 |
+
# bounds, but many iterations may be required from this state later on.
|
51 |
+
# The implemented ad-hoc procedure which intuitively should give a better
|
52 |
+
# initial state: find the least-squares solution on current free variables,
|
53 |
+
# if its feasible then stop, otherwise, set violating variables to
|
54 |
+
# corresponding bounds and continue on the reduced set of free variables.
|
55 |
+
|
56 |
+
while free_set.size > 0:
|
57 |
+
if verbose == 2:
|
58 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
59 |
+
print_iteration_linear(iteration, cost, cost_change, step_norm,
|
60 |
+
optimality)
|
61 |
+
|
62 |
+
iteration += 1
|
63 |
+
x_free_old = x[free_set].copy()
|
64 |
+
|
65 |
+
A_free = A[:, free_set]
|
66 |
+
b_free = b - A.dot(x * active_set)
|
67 |
+
z = lstsq(A_free, b_free, rcond=rcond)[0]
|
68 |
+
|
69 |
+
lbv = z < lb[free_set]
|
70 |
+
ubv = z > ub[free_set]
|
71 |
+
v = lbv | ubv
|
72 |
+
|
73 |
+
if np.any(lbv):
|
74 |
+
ind = free_set[lbv]
|
75 |
+
x[ind] = lb[ind]
|
76 |
+
active_set[ind] = True
|
77 |
+
on_bound[ind] = -1
|
78 |
+
|
79 |
+
if np.any(ubv):
|
80 |
+
ind = free_set[ubv]
|
81 |
+
x[ind] = ub[ind]
|
82 |
+
active_set[ind] = True
|
83 |
+
on_bound[ind] = 1
|
84 |
+
|
85 |
+
ind = free_set[~v]
|
86 |
+
x[ind] = z[~v]
|
87 |
+
|
88 |
+
r = A.dot(x) - b
|
89 |
+
cost_new = 0.5 * np.dot(r, r)
|
90 |
+
cost_change = cost - cost_new
|
91 |
+
cost = cost_new
|
92 |
+
g = A.T.dot(r)
|
93 |
+
step_norm = norm(x[free_set] - x_free_old)
|
94 |
+
|
95 |
+
if np.any(v):
|
96 |
+
free_set = free_set[~v]
|
97 |
+
else:
|
98 |
+
break
|
99 |
+
|
100 |
+
if max_iter is None:
|
101 |
+
max_iter = n
|
102 |
+
max_iter += iteration
|
103 |
+
|
104 |
+
termination_status = None
|
105 |
+
|
106 |
+
# Main BVLS loop.
|
107 |
+
|
108 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
109 |
+
for iteration in range(iteration, max_iter): # BVLS Loop A
|
110 |
+
if verbose == 2:
|
111 |
+
print_iteration_linear(iteration, cost, cost_change,
|
112 |
+
step_norm, optimality)
|
113 |
+
|
114 |
+
if optimality < tol:
|
115 |
+
termination_status = 1
|
116 |
+
|
117 |
+
if termination_status is not None:
|
118 |
+
break
|
119 |
+
|
120 |
+
move_to_free = np.argmax(g * on_bound)
|
121 |
+
on_bound[move_to_free] = 0
|
122 |
+
|
123 |
+
while True: # BVLS Loop B
|
124 |
+
|
125 |
+
free_set = on_bound == 0
|
126 |
+
active_set = ~free_set
|
127 |
+
free_set, = np.nonzero(free_set)
|
128 |
+
|
129 |
+
x_free = x[free_set]
|
130 |
+
x_free_old = x_free.copy()
|
131 |
+
lb_free = lb[free_set]
|
132 |
+
ub_free = ub[free_set]
|
133 |
+
|
134 |
+
A_free = A[:, free_set]
|
135 |
+
b_free = b - A.dot(x * active_set)
|
136 |
+
z = lstsq(A_free, b_free, rcond=rcond)[0]
|
137 |
+
|
138 |
+
lbv, = np.nonzero(z < lb_free)
|
139 |
+
ubv, = np.nonzero(z > ub_free)
|
140 |
+
v = np.hstack((lbv, ubv))
|
141 |
+
|
142 |
+
if v.size > 0:
|
143 |
+
alphas = np.hstack((
|
144 |
+
lb_free[lbv] - x_free[lbv],
|
145 |
+
ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
|
146 |
+
|
147 |
+
i = np.argmin(alphas)
|
148 |
+
i_free = v[i]
|
149 |
+
alpha = alphas[i]
|
150 |
+
|
151 |
+
x_free *= 1 - alpha
|
152 |
+
x_free += alpha * z
|
153 |
+
x[free_set] = x_free
|
154 |
+
|
155 |
+
if i < lbv.size:
|
156 |
+
on_bound[free_set[i_free]] = -1
|
157 |
+
else:
|
158 |
+
on_bound[free_set[i_free]] = 1
|
159 |
+
else:
|
160 |
+
x_free = z
|
161 |
+
x[free_set] = x_free
|
162 |
+
break
|
163 |
+
|
164 |
+
step_norm = norm(x_free - x_free_old)
|
165 |
+
|
166 |
+
r = A.dot(x) - b
|
167 |
+
cost_new = 0.5 * np.dot(r, r)
|
168 |
+
cost_change = cost - cost_new
|
169 |
+
|
170 |
+
if cost_change < tol * cost:
|
171 |
+
termination_status = 2
|
172 |
+
cost = cost_new
|
173 |
+
|
174 |
+
g = A.T.dot(r)
|
175 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
176 |
+
|
177 |
+
if termination_status is None:
|
178 |
+
termination_status = 0
|
179 |
+
|
180 |
+
return OptimizeResult(
|
181 |
+
x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
|
182 |
+
nit=iteration + 1, status=termination_status,
|
183 |
+
initial_cost=initial_cost)
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py
ADDED
@@ -0,0 +1,733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions used by least-squares algorithms."""
|
2 |
+
from math import copysign
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.linalg import norm
|
6 |
+
|
7 |
+
from scipy.linalg import cho_factor, cho_solve, LinAlgError
|
8 |
+
from scipy.sparse import issparse
|
9 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
10 |
+
|
11 |
+
|
12 |
+
EPS = np.finfo(float).eps
|
13 |
+
|
14 |
+
|
15 |
+
# Functions related to a trust-region problem.
|
16 |
+
|
17 |
+
|
18 |
+
def intersect_trust_region(x, s, Delta):
|
19 |
+
"""Find the intersection of a line with the boundary of a trust region.
|
20 |
+
|
21 |
+
This function solves the quadratic equation with respect to t
|
22 |
+
||(x + s*t)||**2 = Delta**2.
|
23 |
+
|
24 |
+
Returns
|
25 |
+
-------
|
26 |
+
t_neg, t_pos : tuple of float
|
27 |
+
Negative and positive roots.
|
28 |
+
|
29 |
+
Raises
|
30 |
+
------
|
31 |
+
ValueError
|
32 |
+
If `s` is zero or `x` is not within the trust region.
|
33 |
+
"""
|
34 |
+
a = np.dot(s, s)
|
35 |
+
if a == 0:
|
36 |
+
raise ValueError("`s` is zero.")
|
37 |
+
|
38 |
+
b = np.dot(x, s)
|
39 |
+
|
40 |
+
c = np.dot(x, x) - Delta**2
|
41 |
+
if c > 0:
|
42 |
+
raise ValueError("`x` is not within the trust region.")
|
43 |
+
|
44 |
+
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
|
45 |
+
|
46 |
+
# Computations below avoid loss of significance, see "Numerical Recipes".
|
47 |
+
q = -(b + copysign(d, b))
|
48 |
+
t1 = q / a
|
49 |
+
t2 = c / q
|
50 |
+
|
51 |
+
if t1 < t2:
|
52 |
+
return t1, t2
|
53 |
+
else:
|
54 |
+
return t2, t1
|
55 |
+
|
56 |
+
|
57 |
+
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
|
58 |
+
rtol=0.01, max_iter=10):
|
59 |
+
"""Solve a trust-region problem arising in least-squares minimization.
|
60 |
+
|
61 |
+
This function implements a method described by J. J. More [1]_ and used
|
62 |
+
in MINPACK, but it relies on a single SVD of Jacobian instead of series
|
63 |
+
of Cholesky decompositions. Before running this function, compute:
|
64 |
+
``U, s, VT = svd(J, full_matrices=False)``.
|
65 |
+
|
66 |
+
Parameters
|
67 |
+
----------
|
68 |
+
n : int
|
69 |
+
Number of variables.
|
70 |
+
m : int
|
71 |
+
Number of residuals.
|
72 |
+
uf : ndarray
|
73 |
+
Computed as U.T.dot(f).
|
74 |
+
s : ndarray
|
75 |
+
Singular values of J.
|
76 |
+
V : ndarray
|
77 |
+
Transpose of VT.
|
78 |
+
Delta : float
|
79 |
+
Radius of a trust region.
|
80 |
+
initial_alpha : float, optional
|
81 |
+
Initial guess for alpha, which might be available from a previous
|
82 |
+
iteration. If None, determined automatically.
|
83 |
+
rtol : float, optional
|
84 |
+
Stopping tolerance for the root-finding procedure. Namely, the
|
85 |
+
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
|
86 |
+
max_iter : int, optional
|
87 |
+
Maximum allowed number of iterations for the root-finding procedure.
|
88 |
+
|
89 |
+
Returns
|
90 |
+
-------
|
91 |
+
p : ndarray, shape (n,)
|
92 |
+
Found solution of a trust-region problem.
|
93 |
+
alpha : float
|
94 |
+
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
|
95 |
+
Sometimes called Levenberg-Marquardt parameter.
|
96 |
+
n_iter : int
|
97 |
+
Number of iterations made by root-finding procedure. Zero means
|
98 |
+
that Gauss-Newton step was selected as the solution.
|
99 |
+
|
100 |
+
References
|
101 |
+
----------
|
102 |
+
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
|
103 |
+
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
|
104 |
+
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
|
105 |
+
"""
|
106 |
+
def phi_and_derivative(alpha, suf, s, Delta):
|
107 |
+
"""Function of which to find zero.
|
108 |
+
|
109 |
+
It is defined as "norm of regularized (by alpha) least-squares
|
110 |
+
solution minus `Delta`". Refer to [1]_.
|
111 |
+
"""
|
112 |
+
denom = s**2 + alpha
|
113 |
+
p_norm = norm(suf / denom)
|
114 |
+
phi = p_norm - Delta
|
115 |
+
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
|
116 |
+
return phi, phi_prime
|
117 |
+
|
118 |
+
suf = s * uf
|
119 |
+
|
120 |
+
# Check if J has full rank and try Gauss-Newton step.
|
121 |
+
if m >= n:
|
122 |
+
threshold = EPS * m * s[0]
|
123 |
+
full_rank = s[-1] > threshold
|
124 |
+
else:
|
125 |
+
full_rank = False
|
126 |
+
|
127 |
+
if full_rank:
|
128 |
+
p = -V.dot(uf / s)
|
129 |
+
if norm(p) <= Delta:
|
130 |
+
return p, 0.0, 0
|
131 |
+
|
132 |
+
alpha_upper = norm(suf) / Delta
|
133 |
+
|
134 |
+
if full_rank:
|
135 |
+
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
|
136 |
+
alpha_lower = -phi / phi_prime
|
137 |
+
else:
|
138 |
+
alpha_lower = 0.0
|
139 |
+
|
140 |
+
if initial_alpha is None or not full_rank and initial_alpha == 0:
|
141 |
+
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
|
142 |
+
else:
|
143 |
+
alpha = initial_alpha
|
144 |
+
|
145 |
+
for it in range(max_iter):
|
146 |
+
if alpha < alpha_lower or alpha > alpha_upper:
|
147 |
+
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
|
148 |
+
|
149 |
+
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
|
150 |
+
|
151 |
+
if phi < 0:
|
152 |
+
alpha_upper = alpha
|
153 |
+
|
154 |
+
ratio = phi / phi_prime
|
155 |
+
alpha_lower = max(alpha_lower, alpha - ratio)
|
156 |
+
alpha -= (phi + Delta) * ratio / Delta
|
157 |
+
|
158 |
+
if np.abs(phi) < rtol * Delta:
|
159 |
+
break
|
160 |
+
|
161 |
+
p = -V.dot(suf / (s**2 + alpha))
|
162 |
+
|
163 |
+
# Make the norm of p equal to Delta, p is changed only slightly during
|
164 |
+
# this. It is done to prevent p lie outside the trust region (which can
|
165 |
+
# cause problems later).
|
166 |
+
p *= Delta / norm(p)
|
167 |
+
|
168 |
+
return p, alpha, it + 1
|
169 |
+
|
170 |
+
|
171 |
+
def solve_trust_region_2d(B, g, Delta):
|
172 |
+
"""Solve a general trust-region problem in 2 dimensions.
|
173 |
+
|
174 |
+
The problem is reformulated as a 4th order algebraic equation,
|
175 |
+
the solution of which is found by numpy.roots.
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
B : ndarray, shape (2, 2)
|
180 |
+
Symmetric matrix, defines a quadratic term of the function.
|
181 |
+
g : ndarray, shape (2,)
|
182 |
+
Defines a linear term of the function.
|
183 |
+
Delta : float
|
184 |
+
Radius of a trust region.
|
185 |
+
|
186 |
+
Returns
|
187 |
+
-------
|
188 |
+
p : ndarray, shape (2,)
|
189 |
+
Found solution.
|
190 |
+
newton_step : bool
|
191 |
+
Whether the returned solution is the Newton step which lies within
|
192 |
+
the trust region.
|
193 |
+
"""
|
194 |
+
try:
|
195 |
+
R, lower = cho_factor(B)
|
196 |
+
p = -cho_solve((R, lower), g)
|
197 |
+
if np.dot(p, p) <= Delta**2:
|
198 |
+
return p, True
|
199 |
+
except LinAlgError:
|
200 |
+
pass
|
201 |
+
|
202 |
+
a = B[0, 0] * Delta**2
|
203 |
+
b = B[0, 1] * Delta**2
|
204 |
+
c = B[1, 1] * Delta**2
|
205 |
+
|
206 |
+
d = g[0] * Delta
|
207 |
+
f = g[1] * Delta
|
208 |
+
|
209 |
+
coeffs = np.array(
|
210 |
+
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
|
211 |
+
t = np.roots(coeffs) # Can handle leading zeros.
|
212 |
+
t = np.real(t[np.isreal(t)])
|
213 |
+
|
214 |
+
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
|
215 |
+
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
|
216 |
+
i = np.argmin(value)
|
217 |
+
p = p[:, i]
|
218 |
+
|
219 |
+
return p, False
|
220 |
+
|
221 |
+
|
222 |
+
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
|
223 |
+
step_norm, bound_hit):
|
224 |
+
"""Update the radius of a trust region based on the cost reduction.
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
Delta : float
|
229 |
+
New radius.
|
230 |
+
ratio : float
|
231 |
+
Ratio between actual and predicted reductions.
|
232 |
+
"""
|
233 |
+
if predicted_reduction > 0:
|
234 |
+
ratio = actual_reduction / predicted_reduction
|
235 |
+
elif predicted_reduction == actual_reduction == 0:
|
236 |
+
ratio = 1
|
237 |
+
else:
|
238 |
+
ratio = 0
|
239 |
+
|
240 |
+
if ratio < 0.25:
|
241 |
+
Delta = 0.25 * step_norm
|
242 |
+
elif ratio > 0.75 and bound_hit:
|
243 |
+
Delta *= 2.0
|
244 |
+
|
245 |
+
return Delta, ratio
|
246 |
+
|
247 |
+
|
248 |
+
# Construction and minimization of quadratic functions.
|
249 |
+
|
250 |
+
|
251 |
+
def build_quadratic_1d(J, g, s, diag=None, s0=None):
|
252 |
+
"""Parameterize a multivariate quadratic function along a line.
|
253 |
+
|
254 |
+
The resulting univariate quadratic function is given as follows::
|
255 |
+
|
256 |
+
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
|
257 |
+
g.T * (s0 + s*t)
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
J : ndarray, sparse matrix or LinearOperator shape (m, n)
|
262 |
+
Jacobian matrix, affects the quadratic term.
|
263 |
+
g : ndarray, shape (n,)
|
264 |
+
Gradient, defines the linear term.
|
265 |
+
s : ndarray, shape (n,)
|
266 |
+
Direction vector of a line.
|
267 |
+
diag : None or ndarray with shape (n,), optional
|
268 |
+
Addition diagonal part, affects the quadratic term.
|
269 |
+
If None, assumed to be 0.
|
270 |
+
s0 : None or ndarray with shape (n,), optional
|
271 |
+
Initial point. If None, assumed to be 0.
|
272 |
+
|
273 |
+
Returns
|
274 |
+
-------
|
275 |
+
a : float
|
276 |
+
Coefficient for t**2.
|
277 |
+
b : float
|
278 |
+
Coefficient for t.
|
279 |
+
c : float
|
280 |
+
Free term. Returned only if `s0` is provided.
|
281 |
+
"""
|
282 |
+
v = J.dot(s)
|
283 |
+
a = np.dot(v, v)
|
284 |
+
if diag is not None:
|
285 |
+
a += np.dot(s * diag, s)
|
286 |
+
a *= 0.5
|
287 |
+
|
288 |
+
b = np.dot(g, s)
|
289 |
+
|
290 |
+
if s0 is not None:
|
291 |
+
u = J.dot(s0)
|
292 |
+
b += np.dot(u, v)
|
293 |
+
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
|
294 |
+
if diag is not None:
|
295 |
+
b += np.dot(s0 * diag, s)
|
296 |
+
c += 0.5 * np.dot(s0 * diag, s0)
|
297 |
+
return a, b, c
|
298 |
+
else:
|
299 |
+
return a, b
|
300 |
+
|
301 |
+
|
302 |
+
def minimize_quadratic_1d(a, b, lb, ub, c=0):
|
303 |
+
"""Minimize a 1-D quadratic function subject to bounds.
|
304 |
+
|
305 |
+
The free term `c` is 0 by default. Bounds must be finite.
|
306 |
+
|
307 |
+
Returns
|
308 |
+
-------
|
309 |
+
t : float
|
310 |
+
Minimum point.
|
311 |
+
y : float
|
312 |
+
Minimum value.
|
313 |
+
"""
|
314 |
+
t = [lb, ub]
|
315 |
+
if a != 0:
|
316 |
+
extremum = -0.5 * b / a
|
317 |
+
if lb < extremum < ub:
|
318 |
+
t.append(extremum)
|
319 |
+
t = np.asarray(t)
|
320 |
+
y = t * (a * t + b) + c
|
321 |
+
min_index = np.argmin(y)
|
322 |
+
return t[min_index], y[min_index]
|
323 |
+
|
324 |
+
|
325 |
+
def evaluate_quadratic(J, g, s, diag=None):
|
326 |
+
"""Compute values of a quadratic function arising in least squares.
|
327 |
+
|
328 |
+
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
|
329 |
+
|
330 |
+
Parameters
|
331 |
+
----------
|
332 |
+
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
|
333 |
+
Jacobian matrix, affects the quadratic term.
|
334 |
+
g : ndarray, shape (n,)
|
335 |
+
Gradient, defines the linear term.
|
336 |
+
s : ndarray, shape (k, n) or (n,)
|
337 |
+
Array containing steps as rows.
|
338 |
+
diag : ndarray, shape (n,), optional
|
339 |
+
Addition diagonal part, affects the quadratic term.
|
340 |
+
If None, assumed to be 0.
|
341 |
+
|
342 |
+
Returns
|
343 |
+
-------
|
344 |
+
values : ndarray with shape (k,) or float
|
345 |
+
Values of the function. If `s` was 2-D, then ndarray is
|
346 |
+
returned, otherwise, float is returned.
|
347 |
+
"""
|
348 |
+
if s.ndim == 1:
|
349 |
+
Js = J.dot(s)
|
350 |
+
q = np.dot(Js, Js)
|
351 |
+
if diag is not None:
|
352 |
+
q += np.dot(s * diag, s)
|
353 |
+
else:
|
354 |
+
Js = J.dot(s.T)
|
355 |
+
q = np.sum(Js**2, axis=0)
|
356 |
+
if diag is not None:
|
357 |
+
q += np.sum(diag * s**2, axis=1)
|
358 |
+
|
359 |
+
l = np.dot(s, g)
|
360 |
+
|
361 |
+
return 0.5 * q + l
|
362 |
+
|
363 |
+
|
364 |
+
# Utility functions to work with bound constraints.
|
365 |
+
|
366 |
+
|
367 |
+
def in_bounds(x, lb, ub):
|
368 |
+
"""Check if a point lies within bounds."""
|
369 |
+
return np.all((x >= lb) & (x <= ub))
|
370 |
+
|
371 |
+
|
372 |
+
def step_size_to_bound(x, s, lb, ub):
|
373 |
+
"""Compute a min_step size required to reach a bound.
|
374 |
+
|
375 |
+
The function computes a positive scalar t, such that x + s * t is on
|
376 |
+
the bound.
|
377 |
+
|
378 |
+
Returns
|
379 |
+
-------
|
380 |
+
step : float
|
381 |
+
Computed step. Non-negative value.
|
382 |
+
hits : ndarray of int with shape of x
|
383 |
+
Each element indicates whether a corresponding variable reaches the
|
384 |
+
bound:
|
385 |
+
|
386 |
+
* 0 - the bound was not hit.
|
387 |
+
* -1 - the lower bound was hit.
|
388 |
+
* 1 - the upper bound was hit.
|
389 |
+
"""
|
390 |
+
non_zero = np.nonzero(s)
|
391 |
+
s_non_zero = s[non_zero]
|
392 |
+
steps = np.empty_like(x)
|
393 |
+
steps.fill(np.inf)
|
394 |
+
with np.errstate(over='ignore'):
|
395 |
+
steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
|
396 |
+
(ub - x)[non_zero] / s_non_zero)
|
397 |
+
min_step = np.min(steps)
|
398 |
+
return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
|
399 |
+
|
400 |
+
|
401 |
+
def find_active_constraints(x, lb, ub, rtol=1e-10):
|
402 |
+
"""Determine which constraints are active in a given point.
|
403 |
+
|
404 |
+
The threshold is computed using `rtol` and the absolute value of the
|
405 |
+
closest bound.
|
406 |
+
|
407 |
+
Returns
|
408 |
+
-------
|
409 |
+
active : ndarray of int with shape of x
|
410 |
+
Each component shows whether the corresponding constraint is active:
|
411 |
+
|
412 |
+
* 0 - a constraint is not active.
|
413 |
+
* -1 - a lower bound is active.
|
414 |
+
* 1 - a upper bound is active.
|
415 |
+
"""
|
416 |
+
active = np.zeros_like(x, dtype=int)
|
417 |
+
|
418 |
+
if rtol == 0:
|
419 |
+
active[x <= lb] = -1
|
420 |
+
active[x >= ub] = 1
|
421 |
+
return active
|
422 |
+
|
423 |
+
lower_dist = x - lb
|
424 |
+
upper_dist = ub - x
|
425 |
+
|
426 |
+
lower_threshold = rtol * np.maximum(1, np.abs(lb))
|
427 |
+
upper_threshold = rtol * np.maximum(1, np.abs(ub))
|
428 |
+
|
429 |
+
lower_active = (np.isfinite(lb) &
|
430 |
+
(lower_dist <= np.minimum(upper_dist, lower_threshold)))
|
431 |
+
active[lower_active] = -1
|
432 |
+
|
433 |
+
upper_active = (np.isfinite(ub) &
|
434 |
+
(upper_dist <= np.minimum(lower_dist, upper_threshold)))
|
435 |
+
active[upper_active] = 1
|
436 |
+
|
437 |
+
return active
|
438 |
+
|
439 |
+
|
440 |
+
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
|
441 |
+
"""Shift a point to the interior of a feasible region.
|
442 |
+
|
443 |
+
Each element of the returned vector is at least at a relative distance
|
444 |
+
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
|
445 |
+
"""
|
446 |
+
x_new = x.copy()
|
447 |
+
|
448 |
+
active = find_active_constraints(x, lb, ub, rstep)
|
449 |
+
lower_mask = np.equal(active, -1)
|
450 |
+
upper_mask = np.equal(active, 1)
|
451 |
+
|
452 |
+
if rstep == 0:
|
453 |
+
x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
|
454 |
+
x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
|
455 |
+
else:
|
456 |
+
x_new[lower_mask] = (lb[lower_mask] +
|
457 |
+
rstep * np.maximum(1, np.abs(lb[lower_mask])))
|
458 |
+
x_new[upper_mask] = (ub[upper_mask] -
|
459 |
+
rstep * np.maximum(1, np.abs(ub[upper_mask])))
|
460 |
+
|
461 |
+
tight_bounds = (x_new < lb) | (x_new > ub)
|
462 |
+
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
|
463 |
+
|
464 |
+
return x_new
|
465 |
+
|
466 |
+
|
467 |
+
def CL_scaling_vector(x, g, lb, ub):
|
468 |
+
"""Compute Coleman-Li scaling vector and its derivatives.
|
469 |
+
|
470 |
+
Components of a vector v are defined as follows::
|
471 |
+
|
472 |
+
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
|
473 |
+
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
|
474 |
+
| 1, otherwise
|
475 |
+
|
476 |
+
According to this definition v[i] >= 0 for all i. It differs from the
|
477 |
+
definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
|
478 |
+
used. Both definitions are equivalent down the line.
|
479 |
+
Derivatives of v with respect to x take value 1, -1 or 0 depending on a
|
480 |
+
case.
|
481 |
+
|
482 |
+
Returns
|
483 |
+
-------
|
484 |
+
v : ndarray with shape of x
|
485 |
+
Scaling vector.
|
486 |
+
dv : ndarray with shape of x
|
487 |
+
Derivatives of v[i] with respect to x[i], diagonal elements of v's
|
488 |
+
Jacobian.
|
489 |
+
|
490 |
+
References
|
491 |
+
----------
|
492 |
+
.. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
|
493 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
494 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
495 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
496 |
+
"""
|
497 |
+
v = np.ones_like(x)
|
498 |
+
dv = np.zeros_like(x)
|
499 |
+
|
500 |
+
mask = (g < 0) & np.isfinite(ub)
|
501 |
+
v[mask] = ub[mask] - x[mask]
|
502 |
+
dv[mask] = -1
|
503 |
+
|
504 |
+
mask = (g > 0) & np.isfinite(lb)
|
505 |
+
v[mask] = x[mask] - lb[mask]
|
506 |
+
dv[mask] = 1
|
507 |
+
|
508 |
+
return v, dv
|
509 |
+
|
510 |
+
|
511 |
+
def reflective_transformation(y, lb, ub):
|
512 |
+
"""Compute reflective transformation and its gradient."""
|
513 |
+
if in_bounds(y, lb, ub):
|
514 |
+
return y, np.ones_like(y)
|
515 |
+
|
516 |
+
lb_finite = np.isfinite(lb)
|
517 |
+
ub_finite = np.isfinite(ub)
|
518 |
+
|
519 |
+
x = y.copy()
|
520 |
+
g_negative = np.zeros_like(y, dtype=bool)
|
521 |
+
|
522 |
+
mask = lb_finite & ~ub_finite
|
523 |
+
x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
|
524 |
+
g_negative[mask] = y[mask] < lb[mask]
|
525 |
+
|
526 |
+
mask = ~lb_finite & ub_finite
|
527 |
+
x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
|
528 |
+
g_negative[mask] = y[mask] > ub[mask]
|
529 |
+
|
530 |
+
mask = lb_finite & ub_finite
|
531 |
+
d = ub - lb
|
532 |
+
t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
|
533 |
+
x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
|
534 |
+
g_negative[mask] = t > d[mask]
|
535 |
+
|
536 |
+
g = np.ones_like(y)
|
537 |
+
g[g_negative] = -1
|
538 |
+
|
539 |
+
return x, g
|
540 |
+
|
541 |
+
|
542 |
+
# Functions to display algorithm's progress.
|
543 |
+
|
544 |
+
|
545 |
+
def print_header_nonlinear():
|
546 |
+
print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}"
|
547 |
+
.format("Iteration", "Total nfev", "Cost", "Cost reduction",
|
548 |
+
"Step norm", "Optimality"))
|
549 |
+
|
550 |
+
|
551 |
+
def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
|
552 |
+
step_norm, optimality):
|
553 |
+
if cost_reduction is None:
|
554 |
+
cost_reduction = " " * 15
|
555 |
+
else:
|
556 |
+
cost_reduction = f"{cost_reduction:^15.2e}"
|
557 |
+
|
558 |
+
if step_norm is None:
|
559 |
+
step_norm = " " * 15
|
560 |
+
else:
|
561 |
+
step_norm = f"{step_norm:^15.2e}"
|
562 |
+
|
563 |
+
print("{:^15}{:^15}{:^15.4e}{}{}{:^15.2e}"
|
564 |
+
.format(iteration, nfev, cost, cost_reduction,
|
565 |
+
step_norm, optimality))
|
566 |
+
|
567 |
+
|
568 |
+
def print_header_linear():
|
569 |
+
print("{:^15}{:^15}{:^15}{:^15}{:^15}"
|
570 |
+
.format("Iteration", "Cost", "Cost reduction", "Step norm",
|
571 |
+
"Optimality"))
|
572 |
+
|
573 |
+
|
574 |
+
def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
|
575 |
+
optimality):
|
576 |
+
if cost_reduction is None:
|
577 |
+
cost_reduction = " " * 15
|
578 |
+
else:
|
579 |
+
cost_reduction = f"{cost_reduction:^15.2e}"
|
580 |
+
|
581 |
+
if step_norm is None:
|
582 |
+
step_norm = " " * 15
|
583 |
+
else:
|
584 |
+
step_norm = f"{step_norm:^15.2e}"
|
585 |
+
|
586 |
+
print(f"{iteration:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}")
|
587 |
+
|
588 |
+
|
589 |
+
# Simple helper functions.
|
590 |
+
|
591 |
+
|
592 |
+
def compute_grad(J, f):
|
593 |
+
"""Compute gradient of the least-squares cost function."""
|
594 |
+
if isinstance(J, LinearOperator):
|
595 |
+
return J.rmatvec(f)
|
596 |
+
else:
|
597 |
+
return J.T.dot(f)
|
598 |
+
|
599 |
+
|
600 |
+
def compute_jac_scale(J, scale_inv_old=None):
|
601 |
+
"""Compute variables scale based on the Jacobian matrix."""
|
602 |
+
if issparse(J):
|
603 |
+
scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
|
604 |
+
else:
|
605 |
+
scale_inv = np.sum(J**2, axis=0)**0.5
|
606 |
+
|
607 |
+
if scale_inv_old is None:
|
608 |
+
scale_inv[scale_inv == 0] = 1
|
609 |
+
else:
|
610 |
+
scale_inv = np.maximum(scale_inv, scale_inv_old)
|
611 |
+
|
612 |
+
return 1 / scale_inv, scale_inv
|
613 |
+
|
614 |
+
|
615 |
+
def left_multiplied_operator(J, d):
|
616 |
+
"""Return diag(d) J as LinearOperator."""
|
617 |
+
J = aslinearoperator(J)
|
618 |
+
|
619 |
+
def matvec(x):
|
620 |
+
return d * J.matvec(x)
|
621 |
+
|
622 |
+
def matmat(X):
|
623 |
+
return d[:, np.newaxis] * J.matmat(X)
|
624 |
+
|
625 |
+
def rmatvec(x):
|
626 |
+
return J.rmatvec(x.ravel() * d)
|
627 |
+
|
628 |
+
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
|
629 |
+
rmatvec=rmatvec)
|
630 |
+
|
631 |
+
|
632 |
+
def right_multiplied_operator(J, d):
|
633 |
+
"""Return J diag(d) as LinearOperator."""
|
634 |
+
J = aslinearoperator(J)
|
635 |
+
|
636 |
+
def matvec(x):
|
637 |
+
return J.matvec(np.ravel(x) * d)
|
638 |
+
|
639 |
+
def matmat(X):
|
640 |
+
return J.matmat(X * d[:, np.newaxis])
|
641 |
+
|
642 |
+
def rmatvec(x):
|
643 |
+
return d * J.rmatvec(x)
|
644 |
+
|
645 |
+
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
|
646 |
+
rmatvec=rmatvec)
|
647 |
+
|
648 |
+
|
649 |
+
def regularized_lsq_operator(J, diag):
|
650 |
+
"""Return a matrix arising in regularized least squares as LinearOperator.
|
651 |
+
|
652 |
+
The matrix is
|
653 |
+
[ J ]
|
654 |
+
[ D ]
|
655 |
+
where D is diagonal matrix with elements from `diag`.
|
656 |
+
"""
|
657 |
+
J = aslinearoperator(J)
|
658 |
+
m, n = J.shape
|
659 |
+
|
660 |
+
def matvec(x):
|
661 |
+
return np.hstack((J.matvec(x), diag * x))
|
662 |
+
|
663 |
+
def rmatvec(x):
|
664 |
+
x1 = x[:m]
|
665 |
+
x2 = x[m:]
|
666 |
+
return J.rmatvec(x1) + diag * x2
|
667 |
+
|
668 |
+
return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
|
669 |
+
|
670 |
+
|
671 |
+
def right_multiply(J, d, copy=True):
|
672 |
+
"""Compute J diag(d).
|
673 |
+
|
674 |
+
If `copy` is False, `J` is modified in place (unless being LinearOperator).
|
675 |
+
"""
|
676 |
+
if copy and not isinstance(J, LinearOperator):
|
677 |
+
J = J.copy()
|
678 |
+
|
679 |
+
if issparse(J):
|
680 |
+
J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
|
681 |
+
elif isinstance(J, LinearOperator):
|
682 |
+
J = right_multiplied_operator(J, d)
|
683 |
+
else:
|
684 |
+
J *= d
|
685 |
+
|
686 |
+
return J
|
687 |
+
|
688 |
+
|
689 |
+
def left_multiply(J, d, copy=True):
|
690 |
+
"""Compute diag(d) J.
|
691 |
+
|
692 |
+
If `copy` is False, `J` is modified in place (unless being LinearOperator).
|
693 |
+
"""
|
694 |
+
if copy and not isinstance(J, LinearOperator):
|
695 |
+
J = J.copy()
|
696 |
+
|
697 |
+
if issparse(J):
|
698 |
+
J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
|
699 |
+
elif isinstance(J, LinearOperator):
|
700 |
+
J = left_multiplied_operator(J, d)
|
701 |
+
else:
|
702 |
+
J *= d[:, np.newaxis]
|
703 |
+
|
704 |
+
return J
|
705 |
+
|
706 |
+
|
707 |
+
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
|
708 |
+
"""Check termination condition for nonlinear least squares."""
|
709 |
+
ftol_satisfied = dF < ftol * F and ratio > 0.25
|
710 |
+
xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
|
711 |
+
|
712 |
+
if ftol_satisfied and xtol_satisfied:
|
713 |
+
return 4
|
714 |
+
elif ftol_satisfied:
|
715 |
+
return 2
|
716 |
+
elif xtol_satisfied:
|
717 |
+
return 3
|
718 |
+
else:
|
719 |
+
return None
|
720 |
+
|
721 |
+
|
722 |
+
def scale_for_robust_loss_function(J, f, rho):
|
723 |
+
"""Scale Jacobian and residuals for a robust loss function.
|
724 |
+
|
725 |
+
Arrays are modified in place.
|
726 |
+
"""
|
727 |
+
J_scale = rho[1] + 2 * rho[2] * f**2
|
728 |
+
J_scale[J_scale < EPS] = EPS
|
729 |
+
J_scale **= 0.5
|
730 |
+
|
731 |
+
f *= rho[1] / J_scale
|
732 |
+
|
733 |
+
return left_multiply(J, J_scale, copy=False), f
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Dogleg algorithm with rectangular trust regions for least-squares minimization.
|
3 |
+
|
4 |
+
The description of the algorithm can be found in [Voglis]_. The algorithm does
|
5 |
+
trust-region iterations, but the shape of trust regions is rectangular as
|
6 |
+
opposed to conventional elliptical. The intersection of a trust region and
|
7 |
+
an initial feasible region is again some rectangle. Thus, on each iteration a
|
8 |
+
bound-constrained quadratic optimization problem is solved.
|
9 |
+
|
10 |
+
A quadratic problem is solved by well-known dogleg approach, where the
|
11 |
+
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
|
12 |
+
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
|
13 |
+
along this path, and optimization amounts to simply following along this
|
14 |
+
path as long as a point stays within the bounds. A constrained Cauchy step
|
15 |
+
(along the anti-gradient) is considered for safety in rank deficient cases,
|
16 |
+
in this situations the convergence might be slow.
|
17 |
+
|
18 |
+
If during iterations some variable hit the initial bound and the component
|
19 |
+
of anti-gradient points outside the feasible region, then a next dogleg step
|
20 |
+
won't make any progress. At this state such variables satisfy first-order
|
21 |
+
optimality conditions and they are excluded before computing a next dogleg
|
22 |
+
step.
|
23 |
+
|
24 |
+
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
|
25 |
+
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
|
26 |
+
dense and sparse matrices, or Jacobian being LinearOperator). The second
|
27 |
+
option allows to solve very large problems (up to couple of millions of
|
28 |
+
residuals on a regular PC), provided the Jacobian matrix is sufficiently
|
29 |
+
sparse. But note that dogbox is not very good for solving problems with
|
30 |
+
large number of constraints, because of variables exclusion-inclusion on each
|
31 |
+
iteration (a required number of function evaluations might be high or accuracy
|
32 |
+
of a solution will be poor), thus its large-scale usage is probably limited
|
33 |
+
to unconstrained problems.
|
34 |
+
|
35 |
+
References
|
36 |
+
----------
|
37 |
+
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
|
38 |
+
Approach for Unconstrained and Bound Constrained Nonlinear
|
39 |
+
Optimization", WSEAS International Conference on Applied
|
40 |
+
Mathematics, Corfu, Greece, 2004.
|
41 |
+
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
|
42 |
+
"""
|
43 |
+
import numpy as np
|
44 |
+
from numpy.linalg import lstsq, norm
|
45 |
+
|
46 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
|
47 |
+
from scipy.optimize import OptimizeResult
|
48 |
+
|
49 |
+
from .common import (
|
50 |
+
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
|
51 |
+
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
|
52 |
+
compute_jac_scale, check_termination, scale_for_robust_loss_function,
|
53 |
+
print_header_nonlinear, print_iteration_nonlinear)
|
54 |
+
|
55 |
+
|
56 |
+
def lsmr_operator(Jop, d, active_set):
|
57 |
+
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
|
58 |
+
|
59 |
+
`active_set` mask is used to excluded active variables from computations
|
60 |
+
of matrix-vector products.
|
61 |
+
"""
|
62 |
+
m, n = Jop.shape
|
63 |
+
|
64 |
+
def matvec(x):
|
65 |
+
x_free = x.ravel().copy()
|
66 |
+
x_free[active_set] = 0
|
67 |
+
return Jop.matvec(x * d)
|
68 |
+
|
69 |
+
def rmatvec(x):
|
70 |
+
r = d * Jop.rmatvec(x)
|
71 |
+
r[active_set] = 0
|
72 |
+
return r
|
73 |
+
|
74 |
+
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
|
75 |
+
|
76 |
+
|
77 |
+
def find_intersection(x, tr_bounds, lb, ub):
|
78 |
+
"""Find intersection of trust-region bounds and initial bounds.
|
79 |
+
|
80 |
+
Returns
|
81 |
+
-------
|
82 |
+
lb_total, ub_total : ndarray with shape of x
|
83 |
+
Lower and upper bounds of the intersection region.
|
84 |
+
orig_l, orig_u : ndarray of bool with shape of x
|
85 |
+
True means that an original bound is taken as a corresponding bound
|
86 |
+
in the intersection region.
|
87 |
+
tr_l, tr_u : ndarray of bool with shape of x
|
88 |
+
True means that a trust-region bound is taken as a corresponding bound
|
89 |
+
in the intersection region.
|
90 |
+
"""
|
91 |
+
lb_centered = lb - x
|
92 |
+
ub_centered = ub - x
|
93 |
+
|
94 |
+
lb_total = np.maximum(lb_centered, -tr_bounds)
|
95 |
+
ub_total = np.minimum(ub_centered, tr_bounds)
|
96 |
+
|
97 |
+
orig_l = np.equal(lb_total, lb_centered)
|
98 |
+
orig_u = np.equal(ub_total, ub_centered)
|
99 |
+
|
100 |
+
tr_l = np.equal(lb_total, -tr_bounds)
|
101 |
+
tr_u = np.equal(ub_total, tr_bounds)
|
102 |
+
|
103 |
+
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
|
104 |
+
|
105 |
+
|
106 |
+
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
|
107 |
+
"""Find dogleg step in a rectangular region.
|
108 |
+
|
109 |
+
Returns
|
110 |
+
-------
|
111 |
+
step : ndarray, shape (n,)
|
112 |
+
Computed dogleg step.
|
113 |
+
bound_hits : ndarray of int, shape (n,)
|
114 |
+
Each component shows whether a corresponding variable hits the
|
115 |
+
initial bound after the step is taken:
|
116 |
+
* 0 - a variable doesn't hit the bound.
|
117 |
+
* -1 - lower bound is hit.
|
118 |
+
* 1 - upper bound is hit.
|
119 |
+
tr_hit : bool
|
120 |
+
Whether the step hit the boundary of the trust-region.
|
121 |
+
"""
|
122 |
+
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
|
123 |
+
x, tr_bounds, lb, ub
|
124 |
+
)
|
125 |
+
bound_hits = np.zeros_like(x, dtype=int)
|
126 |
+
|
127 |
+
if in_bounds(newton_step, lb_total, ub_total):
|
128 |
+
return newton_step, bound_hits, False
|
129 |
+
|
130 |
+
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
|
131 |
+
|
132 |
+
# The classical dogleg algorithm would check if Cauchy step fits into
|
133 |
+
# the bounds, and just return it constrained version if not. But in a
|
134 |
+
# rectangular trust region it makes sense to try to improve constrained
|
135 |
+
# Cauchy step too. Thus, we don't distinguish these two cases.
|
136 |
+
|
137 |
+
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
|
138 |
+
|
139 |
+
step_diff = newton_step - cauchy_step
|
140 |
+
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
|
141 |
+
lb_total, ub_total)
|
142 |
+
bound_hits[(hits < 0) & orig_l] = -1
|
143 |
+
bound_hits[(hits > 0) & orig_u] = 1
|
144 |
+
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
|
145 |
+
|
146 |
+
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
|
147 |
+
|
148 |
+
|
149 |
+
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
|
150 |
+
loss_function, tr_solver, tr_options, verbose):
|
151 |
+
f = f0
|
152 |
+
f_true = f.copy()
|
153 |
+
nfev = 1
|
154 |
+
|
155 |
+
J = J0
|
156 |
+
njev = 1
|
157 |
+
|
158 |
+
if loss_function is not None:
|
159 |
+
rho = loss_function(f)
|
160 |
+
cost = 0.5 * np.sum(rho[0])
|
161 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
162 |
+
else:
|
163 |
+
cost = 0.5 * np.dot(f, f)
|
164 |
+
|
165 |
+
g = compute_grad(J, f)
|
166 |
+
|
167 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
168 |
+
if jac_scale:
|
169 |
+
scale, scale_inv = compute_jac_scale(J)
|
170 |
+
else:
|
171 |
+
scale, scale_inv = x_scale, 1 / x_scale
|
172 |
+
|
173 |
+
Delta = norm(x0 * scale_inv, ord=np.inf)
|
174 |
+
if Delta == 0:
|
175 |
+
Delta = 1.0
|
176 |
+
|
177 |
+
on_bound = np.zeros_like(x0, dtype=int)
|
178 |
+
on_bound[np.equal(x0, lb)] = -1
|
179 |
+
on_bound[np.equal(x0, ub)] = 1
|
180 |
+
|
181 |
+
x = x0
|
182 |
+
step = np.empty_like(x0)
|
183 |
+
|
184 |
+
if max_nfev is None:
|
185 |
+
max_nfev = x0.size * 100
|
186 |
+
|
187 |
+
termination_status = None
|
188 |
+
iteration = 0
|
189 |
+
step_norm = None
|
190 |
+
actual_reduction = None
|
191 |
+
|
192 |
+
if verbose == 2:
|
193 |
+
print_header_nonlinear()
|
194 |
+
|
195 |
+
while True:
|
196 |
+
active_set = on_bound * g < 0
|
197 |
+
free_set = ~active_set
|
198 |
+
|
199 |
+
g_free = g[free_set]
|
200 |
+
g_full = g.copy()
|
201 |
+
g[active_set] = 0
|
202 |
+
|
203 |
+
g_norm = norm(g, ord=np.inf)
|
204 |
+
if g_norm < gtol:
|
205 |
+
termination_status = 1
|
206 |
+
|
207 |
+
if verbose == 2:
|
208 |
+
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
|
209 |
+
step_norm, g_norm)
|
210 |
+
|
211 |
+
if termination_status is not None or nfev == max_nfev:
|
212 |
+
break
|
213 |
+
|
214 |
+
x_free = x[free_set]
|
215 |
+
lb_free = lb[free_set]
|
216 |
+
ub_free = ub[free_set]
|
217 |
+
scale_free = scale[free_set]
|
218 |
+
|
219 |
+
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
|
220 |
+
if tr_solver == 'exact':
|
221 |
+
J_free = J[:, free_set]
|
222 |
+
newton_step = lstsq(J_free, -f, rcond=-1)[0]
|
223 |
+
|
224 |
+
# Coefficients for the quadratic model along the anti-gradient.
|
225 |
+
a, b = build_quadratic_1d(J_free, g_free, -g_free)
|
226 |
+
elif tr_solver == 'lsmr':
|
227 |
+
Jop = aslinearoperator(J)
|
228 |
+
|
229 |
+
# We compute lsmr step in scaled variables and then
|
230 |
+
# transform back to normal variables, if lsmr would give exact lsq
|
231 |
+
# solution, this would be equivalent to not doing any
|
232 |
+
# transformations, but from experience it's better this way.
|
233 |
+
|
234 |
+
# We pass active_set to make computations as if we selected
|
235 |
+
# the free subset of J columns, but without actually doing any
|
236 |
+
# slicing, which is expensive for sparse matrices and impossible
|
237 |
+
# for LinearOperator.
|
238 |
+
|
239 |
+
lsmr_op = lsmr_operator(Jop, scale, active_set)
|
240 |
+
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
|
241 |
+
newton_step *= scale_free
|
242 |
+
|
243 |
+
# Components of g for active variables were zeroed, so this call
|
244 |
+
# is correct and equivalent to using J_free and g_free.
|
245 |
+
a, b = build_quadratic_1d(Jop, g, -g)
|
246 |
+
|
247 |
+
actual_reduction = -1.0
|
248 |
+
while actual_reduction <= 0 and nfev < max_nfev:
|
249 |
+
tr_bounds = Delta * scale_free
|
250 |
+
|
251 |
+
step_free, on_bound_free, tr_hit = dogleg_step(
|
252 |
+
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
|
253 |
+
|
254 |
+
step.fill(0.0)
|
255 |
+
step[free_set] = step_free
|
256 |
+
|
257 |
+
if tr_solver == 'exact':
|
258 |
+
predicted_reduction = -evaluate_quadratic(J_free, g_free,
|
259 |
+
step_free)
|
260 |
+
elif tr_solver == 'lsmr':
|
261 |
+
predicted_reduction = -evaluate_quadratic(Jop, g, step)
|
262 |
+
|
263 |
+
# gh11403 ensure that solution is fully within bounds.
|
264 |
+
x_new = np.clip(x + step, lb, ub)
|
265 |
+
|
266 |
+
f_new = fun(x_new)
|
267 |
+
nfev += 1
|
268 |
+
|
269 |
+
step_h_norm = norm(step * scale_inv, ord=np.inf)
|
270 |
+
|
271 |
+
if not np.all(np.isfinite(f_new)):
|
272 |
+
Delta = 0.25 * step_h_norm
|
273 |
+
continue
|
274 |
+
|
275 |
+
# Usual trust-region step quality estimation.
|
276 |
+
if loss_function is not None:
|
277 |
+
cost_new = loss_function(f_new, cost_only=True)
|
278 |
+
else:
|
279 |
+
cost_new = 0.5 * np.dot(f_new, f_new)
|
280 |
+
actual_reduction = cost - cost_new
|
281 |
+
|
282 |
+
Delta, ratio = update_tr_radius(
|
283 |
+
Delta, actual_reduction, predicted_reduction,
|
284 |
+
step_h_norm, tr_hit
|
285 |
+
)
|
286 |
+
|
287 |
+
step_norm = norm(step)
|
288 |
+
termination_status = check_termination(
|
289 |
+
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
|
290 |
+
|
291 |
+
if termination_status is not None:
|
292 |
+
break
|
293 |
+
|
294 |
+
if actual_reduction > 0:
|
295 |
+
on_bound[free_set] = on_bound_free
|
296 |
+
|
297 |
+
x = x_new
|
298 |
+
# Set variables exactly at the boundary.
|
299 |
+
mask = on_bound == -1
|
300 |
+
x[mask] = lb[mask]
|
301 |
+
mask = on_bound == 1
|
302 |
+
x[mask] = ub[mask]
|
303 |
+
|
304 |
+
f = f_new
|
305 |
+
f_true = f.copy()
|
306 |
+
|
307 |
+
cost = cost_new
|
308 |
+
|
309 |
+
J = jac(x, f)
|
310 |
+
njev += 1
|
311 |
+
|
312 |
+
if loss_function is not None:
|
313 |
+
rho = loss_function(f)
|
314 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
315 |
+
|
316 |
+
g = compute_grad(J, f)
|
317 |
+
|
318 |
+
if jac_scale:
|
319 |
+
scale, scale_inv = compute_jac_scale(J, scale_inv)
|
320 |
+
else:
|
321 |
+
step_norm = 0
|
322 |
+
actual_reduction = 0
|
323 |
+
|
324 |
+
iteration += 1
|
325 |
+
|
326 |
+
if termination_status is None:
|
327 |
+
termination_status = 0
|
328 |
+
|
329 |
+
return OptimizeResult(
|
330 |
+
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
|
331 |
+
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (236 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py
ADDED
@@ -0,0 +1,967 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Generic interface for least-squares minimization."""
|
2 |
+
from warnings import warn
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.linalg import norm
|
6 |
+
|
7 |
+
from scipy.sparse import issparse
|
8 |
+
from scipy.sparse.linalg import LinearOperator
|
9 |
+
from scipy.optimize import _minpack, OptimizeResult
|
10 |
+
from scipy.optimize._numdiff import approx_derivative, group_columns
|
11 |
+
from scipy.optimize._minimize import Bounds
|
12 |
+
|
13 |
+
from .trf import trf
|
14 |
+
from .dogbox import dogbox
|
15 |
+
from .common import EPS, in_bounds, make_strictly_feasible
|
16 |
+
|
17 |
+
|
18 |
+
TERMINATION_MESSAGES = {
|
19 |
+
-1: "Improper input parameters status returned from `leastsq`",
|
20 |
+
0: "The maximum number of function evaluations is exceeded.",
|
21 |
+
1: "`gtol` termination condition is satisfied.",
|
22 |
+
2: "`ftol` termination condition is satisfied.",
|
23 |
+
3: "`xtol` termination condition is satisfied.",
|
24 |
+
4: "Both `ftol` and `xtol` termination conditions are satisfied."
|
25 |
+
}
|
26 |
+
|
27 |
+
|
28 |
+
FROM_MINPACK_TO_COMMON = {
|
29 |
+
0: -1, # Improper input parameters from MINPACK.
|
30 |
+
1: 2,
|
31 |
+
2: 3,
|
32 |
+
3: 4,
|
33 |
+
4: 1,
|
34 |
+
5: 0
|
35 |
+
# There are 6, 7, 8 for too small tolerance parameters,
|
36 |
+
# but we guard against it by checking ftol, xtol, gtol beforehand.
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
|
41 |
+
n = x0.size
|
42 |
+
|
43 |
+
if diff_step is None:
|
44 |
+
epsfcn = EPS
|
45 |
+
else:
|
46 |
+
epsfcn = diff_step**2
|
47 |
+
|
48 |
+
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
|
49 |
+
# ``x_scale='jac'`` corresponds to ``diag=None``.
|
50 |
+
if isinstance(x_scale, str) and x_scale == 'jac':
|
51 |
+
diag = None
|
52 |
+
else:
|
53 |
+
diag = 1 / x_scale
|
54 |
+
|
55 |
+
full_output = True
|
56 |
+
col_deriv = False
|
57 |
+
factor = 100.0
|
58 |
+
|
59 |
+
if jac is None:
|
60 |
+
if max_nfev is None:
|
61 |
+
# n squared to account for Jacobian evaluations.
|
62 |
+
max_nfev = 100 * n * (n + 1)
|
63 |
+
x, info, status = _minpack._lmdif(
|
64 |
+
fun, x0, (), full_output, ftol, xtol, gtol,
|
65 |
+
max_nfev, epsfcn, factor, diag)
|
66 |
+
else:
|
67 |
+
if max_nfev is None:
|
68 |
+
max_nfev = 100 * n
|
69 |
+
x, info, status = _minpack._lmder(
|
70 |
+
fun, jac, x0, (), full_output, col_deriv,
|
71 |
+
ftol, xtol, gtol, max_nfev, factor, diag)
|
72 |
+
|
73 |
+
f = info['fvec']
|
74 |
+
|
75 |
+
if callable(jac):
|
76 |
+
J = jac(x)
|
77 |
+
else:
|
78 |
+
J = np.atleast_2d(approx_derivative(fun, x))
|
79 |
+
|
80 |
+
cost = 0.5 * np.dot(f, f)
|
81 |
+
g = J.T.dot(f)
|
82 |
+
g_norm = norm(g, ord=np.inf)
|
83 |
+
|
84 |
+
nfev = info['nfev']
|
85 |
+
njev = info.get('njev', None)
|
86 |
+
|
87 |
+
status = FROM_MINPACK_TO_COMMON[status]
|
88 |
+
active_mask = np.zeros_like(x0, dtype=int)
|
89 |
+
|
90 |
+
return OptimizeResult(
|
91 |
+
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
|
92 |
+
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
|
93 |
+
|
94 |
+
|
95 |
+
def prepare_bounds(bounds, n):
|
96 |
+
lb, ub = (np.asarray(b, dtype=float) for b in bounds)
|
97 |
+
if lb.ndim == 0:
|
98 |
+
lb = np.resize(lb, n)
|
99 |
+
|
100 |
+
if ub.ndim == 0:
|
101 |
+
ub = np.resize(ub, n)
|
102 |
+
|
103 |
+
return lb, ub
|
104 |
+
|
105 |
+
|
106 |
+
def check_tolerance(ftol, xtol, gtol, method):
|
107 |
+
def check(tol, name):
|
108 |
+
if tol is None:
|
109 |
+
tol = 0
|
110 |
+
elif tol < EPS:
|
111 |
+
warn(f"Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively "
|
112 |
+
f"disables the corresponding termination condition.",
|
113 |
+
stacklevel=3)
|
114 |
+
return tol
|
115 |
+
|
116 |
+
ftol = check(ftol, "ftol")
|
117 |
+
xtol = check(xtol, "xtol")
|
118 |
+
gtol = check(gtol, "gtol")
|
119 |
+
|
120 |
+
if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
|
121 |
+
raise ValueError("All tolerances must be higher than machine epsilon "
|
122 |
+
f"({EPS:.2e}) for method 'lm'.")
|
123 |
+
elif ftol < EPS and xtol < EPS and gtol < EPS:
|
124 |
+
raise ValueError("At least one of the tolerances must be higher than "
|
125 |
+
f"machine epsilon ({EPS:.2e}).")
|
126 |
+
|
127 |
+
return ftol, xtol, gtol
|
128 |
+
|
129 |
+
|
130 |
+
def check_x_scale(x_scale, x0):
|
131 |
+
if isinstance(x_scale, str) and x_scale == 'jac':
|
132 |
+
return x_scale
|
133 |
+
|
134 |
+
try:
|
135 |
+
x_scale = np.asarray(x_scale, dtype=float)
|
136 |
+
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
|
137 |
+
except (ValueError, TypeError):
|
138 |
+
valid = False
|
139 |
+
|
140 |
+
if not valid:
|
141 |
+
raise ValueError("`x_scale` must be 'jac' or array_like with "
|
142 |
+
"positive numbers.")
|
143 |
+
|
144 |
+
if x_scale.ndim == 0:
|
145 |
+
x_scale = np.resize(x_scale, x0.shape)
|
146 |
+
|
147 |
+
if x_scale.shape != x0.shape:
|
148 |
+
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
|
149 |
+
|
150 |
+
return x_scale
|
151 |
+
|
152 |
+
|
153 |
+
def check_jac_sparsity(jac_sparsity, m, n):
|
154 |
+
if jac_sparsity is None:
|
155 |
+
return None
|
156 |
+
|
157 |
+
if not issparse(jac_sparsity):
|
158 |
+
jac_sparsity = np.atleast_2d(jac_sparsity)
|
159 |
+
|
160 |
+
if jac_sparsity.shape != (m, n):
|
161 |
+
raise ValueError("`jac_sparsity` has wrong shape.")
|
162 |
+
|
163 |
+
return jac_sparsity, group_columns(jac_sparsity)
|
164 |
+
|
165 |
+
|
166 |
+
# Loss functions.
|
167 |
+
|
168 |
+
|
169 |
+
def huber(z, rho, cost_only):
|
170 |
+
mask = z <= 1
|
171 |
+
rho[0, mask] = z[mask]
|
172 |
+
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
|
173 |
+
if cost_only:
|
174 |
+
return
|
175 |
+
rho[1, mask] = 1
|
176 |
+
rho[1, ~mask] = z[~mask]**-0.5
|
177 |
+
rho[2, mask] = 0
|
178 |
+
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
|
179 |
+
|
180 |
+
|
181 |
+
def soft_l1(z, rho, cost_only):
|
182 |
+
t = 1 + z
|
183 |
+
rho[0] = 2 * (t**0.5 - 1)
|
184 |
+
if cost_only:
|
185 |
+
return
|
186 |
+
rho[1] = t**-0.5
|
187 |
+
rho[2] = -0.5 * t**-1.5
|
188 |
+
|
189 |
+
|
190 |
+
def cauchy(z, rho, cost_only):
|
191 |
+
rho[0] = np.log1p(z)
|
192 |
+
if cost_only:
|
193 |
+
return
|
194 |
+
t = 1 + z
|
195 |
+
rho[1] = 1 / t
|
196 |
+
rho[2] = -1 / t**2
|
197 |
+
|
198 |
+
|
199 |
+
def arctan(z, rho, cost_only):
|
200 |
+
rho[0] = np.arctan(z)
|
201 |
+
if cost_only:
|
202 |
+
return
|
203 |
+
t = 1 + z**2
|
204 |
+
rho[1] = 1 / t
|
205 |
+
rho[2] = -2 * z / t**2
|
206 |
+
|
207 |
+
|
208 |
+
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
|
209 |
+
cauchy=cauchy, arctan=arctan)
|
210 |
+
|
211 |
+
|
212 |
+
def construct_loss_function(m, loss, f_scale):
|
213 |
+
if loss == 'linear':
|
214 |
+
return None
|
215 |
+
|
216 |
+
if not callable(loss):
|
217 |
+
loss = IMPLEMENTED_LOSSES[loss]
|
218 |
+
rho = np.empty((3, m))
|
219 |
+
|
220 |
+
def loss_function(f, cost_only=False):
|
221 |
+
z = (f / f_scale) ** 2
|
222 |
+
loss(z, rho, cost_only=cost_only)
|
223 |
+
if cost_only:
|
224 |
+
return 0.5 * f_scale ** 2 * np.sum(rho[0])
|
225 |
+
rho[0] *= f_scale ** 2
|
226 |
+
rho[2] /= f_scale ** 2
|
227 |
+
return rho
|
228 |
+
else:
|
229 |
+
def loss_function(f, cost_only=False):
|
230 |
+
z = (f / f_scale) ** 2
|
231 |
+
rho = loss(z)
|
232 |
+
if cost_only:
|
233 |
+
return 0.5 * f_scale ** 2 * np.sum(rho[0])
|
234 |
+
rho[0] *= f_scale ** 2
|
235 |
+
rho[2] /= f_scale ** 2
|
236 |
+
return rho
|
237 |
+
|
238 |
+
return loss_function
|
239 |
+
|
240 |
+
|
241 |
+
def least_squares(
|
242 |
+
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
|
243 |
+
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
|
244 |
+
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
|
245 |
+
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
|
246 |
+
"""Solve a nonlinear least-squares problem with bounds on the variables.
|
247 |
+
|
248 |
+
Given the residuals f(x) (an m-D real function of n real
|
249 |
+
variables) and the loss function rho(s) (a scalar function), `least_squares`
|
250 |
+
finds a local minimum of the cost function F(x)::
|
251 |
+
|
252 |
+
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
|
253 |
+
subject to lb <= x <= ub
|
254 |
+
|
255 |
+
The purpose of the loss function rho(s) is to reduce the influence of
|
256 |
+
outliers on the solution.
|
257 |
+
|
258 |
+
Parameters
|
259 |
+
----------
|
260 |
+
fun : callable
|
261 |
+
Function which computes the vector of residuals, with the signature
|
262 |
+
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
|
263 |
+
respect to its first argument. The argument ``x`` passed to this
|
264 |
+
function is an ndarray of shape (n,) (never a scalar, even for n=1).
|
265 |
+
It must allocate and return a 1-D array_like of shape (m,) or a scalar.
|
266 |
+
If the argument ``x`` is complex or the function ``fun`` returns
|
267 |
+
complex residuals, it must be wrapped in a real function of real
|
268 |
+
arguments, as shown at the end of the Examples section.
|
269 |
+
x0 : array_like with shape (n,) or float
|
270 |
+
Initial guess on independent variables. If float, it will be treated
|
271 |
+
as a 1-D array with one element. When `method` is 'trf', the initial
|
272 |
+
guess might be slightly adjusted to lie sufficiently within the given
|
273 |
+
`bounds`.
|
274 |
+
jac : {'2-point', '3-point', 'cs', callable}, optional
|
275 |
+
Method of computing the Jacobian matrix (an m-by-n matrix, where
|
276 |
+
element (i, j) is the partial derivative of f[i] with respect to
|
277 |
+
x[j]). The keywords select a finite difference scheme for numerical
|
278 |
+
estimation. The scheme '3-point' is more accurate, but requires
|
279 |
+
twice as many operations as '2-point' (default). The scheme 'cs'
|
280 |
+
uses complex steps, and while potentially the most accurate, it is
|
281 |
+
applicable only when `fun` correctly handles complex inputs and
|
282 |
+
can be analytically continued to the complex plane. Method 'lm'
|
283 |
+
always uses the '2-point' scheme. If callable, it is used as
|
284 |
+
``jac(x, *args, **kwargs)`` and should return a good approximation
|
285 |
+
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
|
286 |
+
is applied), a sparse matrix (csr_matrix preferred for performance) or
|
287 |
+
a `scipy.sparse.linalg.LinearOperator`.
|
288 |
+
bounds : 2-tuple of array_like or `Bounds`, optional
|
289 |
+
There are two ways to specify bounds:
|
290 |
+
|
291 |
+
1. Instance of `Bounds` class
|
292 |
+
2. Lower and upper bounds on independent variables. Defaults to no
|
293 |
+
bounds. Each array must match the size of `x0` or be a scalar,
|
294 |
+
in the latter case a bound will be the same for all variables.
|
295 |
+
Use ``np.inf`` with an appropriate sign to disable bounds on all
|
296 |
+
or some variables.
|
297 |
+
method : {'trf', 'dogbox', 'lm'}, optional
|
298 |
+
Algorithm to perform minimization.
|
299 |
+
|
300 |
+
* 'trf' : Trust Region Reflective algorithm, particularly suitable
|
301 |
+
for large sparse problems with bounds. Generally robust method.
|
302 |
+
* 'dogbox' : dogleg algorithm with rectangular trust regions,
|
303 |
+
typical use case is small problems with bounds. Not recommended
|
304 |
+
for problems with rank-deficient Jacobian.
|
305 |
+
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
|
306 |
+
Doesn't handle bounds and sparse Jacobians. Usually the most
|
307 |
+
efficient method for small unconstrained problems.
|
308 |
+
|
309 |
+
Default is 'trf'. See Notes for more information.
|
310 |
+
ftol : float or None, optional
|
311 |
+
Tolerance for termination by the change of the cost function. Default
|
312 |
+
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
|
313 |
+
and there was an adequate agreement between a local quadratic model and
|
314 |
+
the true model in the last step.
|
315 |
+
|
316 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
317 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
318 |
+
machine epsilon.
|
319 |
+
xtol : float or None, optional
|
320 |
+
Tolerance for termination by the change of the independent variables.
|
321 |
+
Default is 1e-8. The exact condition depends on the `method` used:
|
322 |
+
|
323 |
+
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
|
324 |
+
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
|
325 |
+
a trust-region radius and ``xs`` is the value of ``x``
|
326 |
+
scaled according to `x_scale` parameter (see below).
|
327 |
+
|
328 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
329 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
330 |
+
machine epsilon.
|
331 |
+
gtol : float or None, optional
|
332 |
+
Tolerance for termination by the norm of the gradient. Default is 1e-8.
|
333 |
+
The exact condition depends on a `method` used:
|
334 |
+
|
335 |
+
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
|
336 |
+
``g_scaled`` is the value of the gradient scaled to account for
|
337 |
+
the presence of the bounds [STIR]_.
|
338 |
+
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
|
339 |
+
``g_free`` is the gradient with respect to the variables which
|
340 |
+
are not in the optimal state on the boundary.
|
341 |
+
* For 'lm' : the maximum absolute value of the cosine of angles
|
342 |
+
between columns of the Jacobian and the residual vector is less
|
343 |
+
than `gtol`, or the residual vector is zero.
|
344 |
+
|
345 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
346 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
347 |
+
machine epsilon.
|
348 |
+
x_scale : array_like or 'jac', optional
|
349 |
+
Characteristic scale of each variable. Setting `x_scale` is equivalent
|
350 |
+
to reformulating the problem in scaled variables ``xs = x / x_scale``.
|
351 |
+
An alternative view is that the size of a trust region along jth
|
352 |
+
dimension is proportional to ``x_scale[j]``. Improved convergence may
|
353 |
+
be achieved by setting `x_scale` such that a step of a given size
|
354 |
+
along any of the scaled variables has a similar effect on the cost
|
355 |
+
function. If set to 'jac', the scale is iteratively updated using the
|
356 |
+
inverse norms of the columns of the Jacobian matrix (as described in
|
357 |
+
[JJMore]_).
|
358 |
+
loss : str or callable, optional
|
359 |
+
Determines the loss function. The following keyword values are allowed:
|
360 |
+
|
361 |
+
* 'linear' (default) : ``rho(z) = z``. Gives a standard
|
362 |
+
least-squares problem.
|
363 |
+
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
|
364 |
+
approximation of l1 (absolute value) loss. Usually a good
|
365 |
+
choice for robust least squares.
|
366 |
+
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
|
367 |
+
similarly to 'soft_l1'.
|
368 |
+
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
|
369 |
+
influence, but may cause difficulties in optimization process.
|
370 |
+
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
|
371 |
+
a single residual, has properties similar to 'cauchy'.
|
372 |
+
|
373 |
+
If callable, it must take a 1-D ndarray ``z=f**2`` and return an
|
374 |
+
array_like with shape (3, m) where row 0 contains function values,
|
375 |
+
row 1 contains first derivatives and row 2 contains second
|
376 |
+
derivatives. Method 'lm' supports only 'linear' loss.
|
377 |
+
f_scale : float, optional
|
378 |
+
Value of soft margin between inlier and outlier residuals, default
|
379 |
+
is 1.0. The loss function is evaluated as follows
|
380 |
+
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
|
381 |
+
and ``rho`` is determined by `loss` parameter. This parameter has
|
382 |
+
no effect with ``loss='linear'``, but for other `loss` values it is
|
383 |
+
of crucial importance.
|
384 |
+
max_nfev : None or int, optional
|
385 |
+
Maximum number of function evaluations before the termination.
|
386 |
+
If None (default), the value is chosen automatically:
|
387 |
+
|
388 |
+
* For 'trf' and 'dogbox' : 100 * n.
|
389 |
+
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
|
390 |
+
otherwise (because 'lm' counts function calls in Jacobian
|
391 |
+
estimation).
|
392 |
+
|
393 |
+
diff_step : None or array_like, optional
|
394 |
+
Determines the relative step size for the finite difference
|
395 |
+
approximation of the Jacobian. The actual step is computed as
|
396 |
+
``x * diff_step``. If None (default), then `diff_step` is taken to be
|
397 |
+
a conventional "optimal" power of machine epsilon for the finite
|
398 |
+
difference scheme used [NR]_.
|
399 |
+
tr_solver : {None, 'exact', 'lsmr'}, optional
|
400 |
+
Method for solving trust-region subproblems, relevant only for 'trf'
|
401 |
+
and 'dogbox' methods.
|
402 |
+
|
403 |
+
* 'exact' is suitable for not very large problems with dense
|
404 |
+
Jacobian matrices. The computational complexity per iteration is
|
405 |
+
comparable to a singular value decomposition of the Jacobian
|
406 |
+
matrix.
|
407 |
+
* 'lsmr' is suitable for problems with sparse and large Jacobian
|
408 |
+
matrices. It uses the iterative procedure
|
409 |
+
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
|
410 |
+
least-squares problem and only requires matrix-vector product
|
411 |
+
evaluations.
|
412 |
+
|
413 |
+
If None (default), the solver is chosen based on the type of Jacobian
|
414 |
+
returned on the first iteration.
|
415 |
+
tr_options : dict, optional
|
416 |
+
Keyword options passed to trust-region solver.
|
417 |
+
|
418 |
+
* ``tr_solver='exact'``: `tr_options` are ignored.
|
419 |
+
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
|
420 |
+
Additionally, ``method='trf'`` supports 'regularize' option
|
421 |
+
(bool, default is True), which adds a regularization term to the
|
422 |
+
normal equation, which improves convergence if the Jacobian is
|
423 |
+
rank-deficient [Byrd]_ (eq. 3.4).
|
424 |
+
|
425 |
+
jac_sparsity : {None, array_like, sparse matrix}, optional
|
426 |
+
Defines the sparsity structure of the Jacobian matrix for finite
|
427 |
+
difference estimation, its shape must be (m, n). If the Jacobian has
|
428 |
+
only few non-zero elements in *each* row, providing the sparsity
|
429 |
+
structure will greatly speed up the computations [Curtis]_. A zero
|
430 |
+
entry means that a corresponding element in the Jacobian is identically
|
431 |
+
zero. If provided, forces the use of 'lsmr' trust-region solver.
|
432 |
+
If None (default), then dense differencing will be used. Has no effect
|
433 |
+
for 'lm' method.
|
434 |
+
verbose : {0, 1, 2}, optional
|
435 |
+
Level of algorithm's verbosity:
|
436 |
+
|
437 |
+
* 0 (default) : work silently.
|
438 |
+
* 1 : display a termination report.
|
439 |
+
* 2 : display progress during iterations (not supported by 'lm'
|
440 |
+
method).
|
441 |
+
|
442 |
+
args, kwargs : tuple and dict, optional
|
443 |
+
Additional arguments passed to `fun` and `jac`. Both empty by default.
|
444 |
+
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
|
445 |
+
`jac`.
|
446 |
+
|
447 |
+
Returns
|
448 |
+
-------
|
449 |
+
result : OptimizeResult
|
450 |
+
`OptimizeResult` with the following fields defined:
|
451 |
+
|
452 |
+
x : ndarray, shape (n,)
|
453 |
+
Solution found.
|
454 |
+
cost : float
|
455 |
+
Value of the cost function at the solution.
|
456 |
+
fun : ndarray, shape (m,)
|
457 |
+
Vector of residuals at the solution.
|
458 |
+
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
|
459 |
+
Modified Jacobian matrix at the solution, in the sense that J^T J
|
460 |
+
is a Gauss-Newton approximation of the Hessian of the cost function.
|
461 |
+
The type is the same as the one used by the algorithm.
|
462 |
+
grad : ndarray, shape (m,)
|
463 |
+
Gradient of the cost function at the solution.
|
464 |
+
optimality : float
|
465 |
+
First-order optimality measure. In unconstrained problems, it is
|
466 |
+
always the uniform norm of the gradient. In constrained problems,
|
467 |
+
it is the quantity which was compared with `gtol` during iterations.
|
468 |
+
active_mask : ndarray of int, shape (n,)
|
469 |
+
Each component shows whether a corresponding constraint is active
|
470 |
+
(that is, whether a variable is at the bound):
|
471 |
+
|
472 |
+
* 0 : a constraint is not active.
|
473 |
+
* -1 : a lower bound is active.
|
474 |
+
* 1 : an upper bound is active.
|
475 |
+
|
476 |
+
Might be somewhat arbitrary for 'trf' method as it generates a
|
477 |
+
sequence of strictly feasible iterates and `active_mask` is
|
478 |
+
determined within a tolerance threshold.
|
479 |
+
nfev : int
|
480 |
+
Number of function evaluations done. Methods 'trf' and 'dogbox' do
|
481 |
+
not count function calls for numerical Jacobian approximation, as
|
482 |
+
opposed to 'lm' method.
|
483 |
+
njev : int or None
|
484 |
+
Number of Jacobian evaluations done. If numerical Jacobian
|
485 |
+
approximation is used in 'lm' method, it is set to None.
|
486 |
+
status : int
|
487 |
+
The reason for algorithm termination:
|
488 |
+
|
489 |
+
* -1 : improper input parameters status returned from MINPACK.
|
490 |
+
* 0 : the maximum number of function evaluations is exceeded.
|
491 |
+
* 1 : `gtol` termination condition is satisfied.
|
492 |
+
* 2 : `ftol` termination condition is satisfied.
|
493 |
+
* 3 : `xtol` termination condition is satisfied.
|
494 |
+
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
|
495 |
+
|
496 |
+
message : str
|
497 |
+
Verbal description of the termination reason.
|
498 |
+
success : bool
|
499 |
+
True if one of the convergence criteria is satisfied (`status` > 0).
|
500 |
+
|
501 |
+
See Also
|
502 |
+
--------
|
503 |
+
leastsq : A legacy wrapper for the MINPACK implementation of the
|
504 |
+
Levenberg-Marquadt algorithm.
|
505 |
+
curve_fit : Least-squares minimization applied to a curve-fitting problem.
|
506 |
+
|
507 |
+
Notes
|
508 |
+
-----
|
509 |
+
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
|
510 |
+
algorithms implemented in MINPACK (lmder, lmdif). It runs the
|
511 |
+
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
|
512 |
+
The implementation is based on paper [JJMore]_, it is very robust and
|
513 |
+
efficient with a lot of smart tricks. It should be your first choice
|
514 |
+
for unconstrained problems. Note that it doesn't support bounds. Also,
|
515 |
+
it doesn't work when m < n.
|
516 |
+
|
517 |
+
Method 'trf' (Trust Region Reflective) is motivated by the process of
|
518 |
+
solving a system of equations, which constitute the first-order optimality
|
519 |
+
condition for a bound-constrained minimization problem as formulated in
|
520 |
+
[STIR]_. The algorithm iteratively solves trust-region subproblems
|
521 |
+
augmented by a special diagonal quadratic term and with trust-region shape
|
522 |
+
determined by the distance from the bounds and the direction of the
|
523 |
+
gradient. This enhancements help to avoid making steps directly into bounds
|
524 |
+
and efficiently explore the whole space of variables. To further improve
|
525 |
+
convergence, the algorithm considers search directions reflected from the
|
526 |
+
bounds. To obey theoretical requirements, the algorithm keeps iterates
|
527 |
+
strictly feasible. With dense Jacobians trust-region subproblems are
|
528 |
+
solved by an exact method very similar to the one described in [JJMore]_
|
529 |
+
(and implemented in MINPACK). The difference from the MINPACK
|
530 |
+
implementation is that a singular value decomposition of a Jacobian
|
531 |
+
matrix is done once per iteration, instead of a QR decomposition and series
|
532 |
+
of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
|
533 |
+
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
|
534 |
+
The subspace is spanned by a scaled gradient and an approximate
|
535 |
+
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
|
536 |
+
constraints are imposed the algorithm is very similar to MINPACK and has
|
537 |
+
generally comparable performance. The algorithm works quite robust in
|
538 |
+
unbounded and bounded problems, thus it is chosen as a default algorithm.
|
539 |
+
|
540 |
+
Method 'dogbox' operates in a trust-region framework, but considers
|
541 |
+
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
|
542 |
+
The intersection of a current trust region and initial bounds is again
|
543 |
+
rectangular, so on each iteration a quadratic minimization problem subject
|
544 |
+
to bound constraints is solved approximately by Powell's dogleg method
|
545 |
+
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
|
546 |
+
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
|
547 |
+
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
|
548 |
+
the rank of Jacobian is less than the number of variables. The algorithm
|
549 |
+
often outperforms 'trf' in bounded problems with a small number of
|
550 |
+
variables.
|
551 |
+
|
552 |
+
Robust loss functions are implemented as described in [BA]_. The idea
|
553 |
+
is to modify a residual vector and a Jacobian matrix on each iteration
|
554 |
+
such that computed gradient and Gauss-Newton Hessian approximation match
|
555 |
+
the true gradient and Hessian approximation of the cost function. Then
|
556 |
+
the algorithm proceeds in a normal way, i.e., robust loss functions are
|
557 |
+
implemented as a simple wrapper over standard least-squares algorithms.
|
558 |
+
|
559 |
+
.. versionadded:: 0.17.0
|
560 |
+
|
561 |
+
References
|
562 |
+
----------
|
563 |
+
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
|
564 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
565 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
566 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
567 |
+
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
|
568 |
+
Computing. 3rd edition", Sec. 5.7.
|
569 |
+
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
|
570 |
+
solution of the trust region problem by minimization over
|
571 |
+
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
|
572 |
+
1988.
|
573 |
+
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
574 |
+
sparse Jacobian matrices", Journal of the Institute of
|
575 |
+
Mathematics and its Applications, 13, pp. 117-120, 1974.
|
576 |
+
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
|
577 |
+
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
|
578 |
+
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
|
579 |
+
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
|
580 |
+
Dogleg Approach for Unconstrained and Bound Constrained
|
581 |
+
Nonlinear Optimization", WSEAS International Conference on
|
582 |
+
Applied Mathematics, Corfu, Greece, 2004.
|
583 |
+
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
|
584 |
+
2nd edition", Chapter 4.
|
585 |
+
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
|
586 |
+
Proceedings of the International Workshop on Vision Algorithms:
|
587 |
+
Theory and Practice, pp. 298-372, 1999.
|
588 |
+
|
589 |
+
Examples
|
590 |
+
--------
|
591 |
+
In this example we find a minimum of the Rosenbrock function without bounds
|
592 |
+
on independent variables.
|
593 |
+
|
594 |
+
>>> import numpy as np
|
595 |
+
>>> def fun_rosenbrock(x):
|
596 |
+
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
|
597 |
+
|
598 |
+
Notice that we only provide the vector of the residuals. The algorithm
|
599 |
+
constructs the cost function as a sum of squares of the residuals, which
|
600 |
+
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
|
601 |
+
|
602 |
+
>>> from scipy.optimize import least_squares
|
603 |
+
>>> x0_rosenbrock = np.array([2, 2])
|
604 |
+
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
|
605 |
+
>>> res_1.x
|
606 |
+
array([ 1., 1.])
|
607 |
+
>>> res_1.cost
|
608 |
+
9.8669242910846867e-30
|
609 |
+
>>> res_1.optimality
|
610 |
+
8.8928864934219529e-14
|
611 |
+
|
612 |
+
We now constrain the variables, in such a way that the previous solution
|
613 |
+
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
|
614 |
+
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
|
615 |
+
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
|
616 |
+
|
617 |
+
We also provide the analytic Jacobian:
|
618 |
+
|
619 |
+
>>> def jac_rosenbrock(x):
|
620 |
+
... return np.array([
|
621 |
+
... [-20 * x[0], 10],
|
622 |
+
... [-1, 0]])
|
623 |
+
|
624 |
+
Putting this all together, we see that the new solution lies on the bound:
|
625 |
+
|
626 |
+
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
|
627 |
+
... bounds=([-np.inf, 1.5], np.inf))
|
628 |
+
>>> res_2.x
|
629 |
+
array([ 1.22437075, 1.5 ])
|
630 |
+
>>> res_2.cost
|
631 |
+
0.025213093946805685
|
632 |
+
>>> res_2.optimality
|
633 |
+
1.5885401433157753e-07
|
634 |
+
|
635 |
+
Now we solve a system of equations (i.e., the cost function should be zero
|
636 |
+
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
|
637 |
+
variables:
|
638 |
+
|
639 |
+
>>> def fun_broyden(x):
|
640 |
+
... f = (3 - x) * x + 1
|
641 |
+
... f[1:] -= x[:-1]
|
642 |
+
... f[:-1] -= 2 * x[1:]
|
643 |
+
... return f
|
644 |
+
|
645 |
+
The corresponding Jacobian matrix is sparse. We tell the algorithm to
|
646 |
+
estimate it by finite differences and provide the sparsity structure of
|
647 |
+
Jacobian to significantly speed up this process.
|
648 |
+
|
649 |
+
>>> from scipy.sparse import lil_matrix
|
650 |
+
>>> def sparsity_broyden(n):
|
651 |
+
... sparsity = lil_matrix((n, n), dtype=int)
|
652 |
+
... i = np.arange(n)
|
653 |
+
... sparsity[i, i] = 1
|
654 |
+
... i = np.arange(1, n)
|
655 |
+
... sparsity[i, i - 1] = 1
|
656 |
+
... i = np.arange(n - 1)
|
657 |
+
... sparsity[i, i + 1] = 1
|
658 |
+
... return sparsity
|
659 |
+
...
|
660 |
+
>>> n = 100000
|
661 |
+
>>> x0_broyden = -np.ones(n)
|
662 |
+
...
|
663 |
+
>>> res_3 = least_squares(fun_broyden, x0_broyden,
|
664 |
+
... jac_sparsity=sparsity_broyden(n))
|
665 |
+
>>> res_3.cost
|
666 |
+
4.5687069299604613e-23
|
667 |
+
>>> res_3.optimality
|
668 |
+
1.1650454296851518e-11
|
669 |
+
|
670 |
+
Let's also solve a curve fitting problem using robust loss function to
|
671 |
+
take care of outliers in the data. Define the model function as
|
672 |
+
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
|
673 |
+
observation and a, b, c are parameters to estimate.
|
674 |
+
|
675 |
+
First, define the function which generates the data with noise and
|
676 |
+
outliers, define the model parameters, and generate data:
|
677 |
+
|
678 |
+
>>> from numpy.random import default_rng
|
679 |
+
>>> rng = default_rng()
|
680 |
+
>>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
|
681 |
+
... rng = default_rng(seed)
|
682 |
+
...
|
683 |
+
... y = a + b * np.exp(t * c)
|
684 |
+
...
|
685 |
+
... error = noise * rng.standard_normal(t.size)
|
686 |
+
... outliers = rng.integers(0, t.size, n_outliers)
|
687 |
+
... error[outliers] *= 10
|
688 |
+
...
|
689 |
+
... return y + error
|
690 |
+
...
|
691 |
+
>>> a = 0.5
|
692 |
+
>>> b = 2.0
|
693 |
+
>>> c = -1
|
694 |
+
>>> t_min = 0
|
695 |
+
>>> t_max = 10
|
696 |
+
>>> n_points = 15
|
697 |
+
...
|
698 |
+
>>> t_train = np.linspace(t_min, t_max, n_points)
|
699 |
+
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
|
700 |
+
|
701 |
+
Define function for computing residuals and initial estimate of
|
702 |
+
parameters.
|
703 |
+
|
704 |
+
>>> def fun(x, t, y):
|
705 |
+
... return x[0] + x[1] * np.exp(x[2] * t) - y
|
706 |
+
...
|
707 |
+
>>> x0 = np.array([1.0, 1.0, 0.0])
|
708 |
+
|
709 |
+
Compute a standard least-squares solution:
|
710 |
+
|
711 |
+
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
|
712 |
+
|
713 |
+
Now compute two solutions with two different robust loss functions. The
|
714 |
+
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
|
715 |
+
not significantly exceed 0.1 (the noise level used).
|
716 |
+
|
717 |
+
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
|
718 |
+
... args=(t_train, y_train))
|
719 |
+
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
|
720 |
+
... args=(t_train, y_train))
|
721 |
+
|
722 |
+
And, finally, plot all the curves. We see that by selecting an appropriate
|
723 |
+
`loss` we can get estimates close to optimal even in the presence of
|
724 |
+
strong outliers. But keep in mind that generally it is recommended to try
|
725 |
+
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
|
726 |
+
options may cause difficulties in optimization process.
|
727 |
+
|
728 |
+
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
|
729 |
+
>>> y_true = gen_data(t_test, a, b, c)
|
730 |
+
>>> y_lsq = gen_data(t_test, *res_lsq.x)
|
731 |
+
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
|
732 |
+
>>> y_log = gen_data(t_test, *res_log.x)
|
733 |
+
...
|
734 |
+
>>> import matplotlib.pyplot as plt
|
735 |
+
>>> plt.plot(t_train, y_train, 'o')
|
736 |
+
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
|
737 |
+
>>> plt.plot(t_test, y_lsq, label='linear loss')
|
738 |
+
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
|
739 |
+
>>> plt.plot(t_test, y_log, label='cauchy loss')
|
740 |
+
>>> plt.xlabel("t")
|
741 |
+
>>> plt.ylabel("y")
|
742 |
+
>>> plt.legend()
|
743 |
+
>>> plt.show()
|
744 |
+
|
745 |
+
In the next example, we show how complex-valued residual functions of
|
746 |
+
complex variables can be optimized with ``least_squares()``. Consider the
|
747 |
+
following function:
|
748 |
+
|
749 |
+
>>> def f(z):
|
750 |
+
... return z - (0.5 + 0.5j)
|
751 |
+
|
752 |
+
We wrap it into a function of real variables that returns real residuals
|
753 |
+
by simply handling the real and imaginary parts as independent variables:
|
754 |
+
|
755 |
+
>>> def f_wrap(x):
|
756 |
+
... fx = f(x[0] + 1j*x[1])
|
757 |
+
... return np.array([fx.real, fx.imag])
|
758 |
+
|
759 |
+
Thus, instead of the original m-D complex function of n complex
|
760 |
+
variables we optimize a 2m-D real function of 2n real variables:
|
761 |
+
|
762 |
+
>>> from scipy.optimize import least_squares
|
763 |
+
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
|
764 |
+
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
|
765 |
+
>>> z
|
766 |
+
(0.49999999999925893+0.49999999999925893j)
|
767 |
+
|
768 |
+
"""
|
769 |
+
if method not in ['trf', 'dogbox', 'lm']:
|
770 |
+
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
|
771 |
+
|
772 |
+
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
|
773 |
+
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
|
774 |
+
"callable.")
|
775 |
+
|
776 |
+
if tr_solver not in [None, 'exact', 'lsmr']:
|
777 |
+
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
|
778 |
+
|
779 |
+
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
|
780 |
+
raise ValueError("`loss` must be one of {} or a callable."
|
781 |
+
.format(IMPLEMENTED_LOSSES.keys()))
|
782 |
+
|
783 |
+
if method == 'lm' and loss != 'linear':
|
784 |
+
raise ValueError("method='lm' supports only 'linear' loss function.")
|
785 |
+
|
786 |
+
if verbose not in [0, 1, 2]:
|
787 |
+
raise ValueError("`verbose` must be in [0, 1, 2].")
|
788 |
+
|
789 |
+
if max_nfev is not None and max_nfev <= 0:
|
790 |
+
raise ValueError("`max_nfev` must be None or positive integer.")
|
791 |
+
|
792 |
+
if np.iscomplexobj(x0):
|
793 |
+
raise ValueError("`x0` must be real.")
|
794 |
+
|
795 |
+
x0 = np.atleast_1d(x0).astype(float)
|
796 |
+
|
797 |
+
if x0.ndim > 1:
|
798 |
+
raise ValueError("`x0` must have at most 1 dimension.")
|
799 |
+
|
800 |
+
if isinstance(bounds, Bounds):
|
801 |
+
lb, ub = bounds.lb, bounds.ub
|
802 |
+
bounds = (lb, ub)
|
803 |
+
else:
|
804 |
+
if len(bounds) == 2:
|
805 |
+
lb, ub = prepare_bounds(bounds, x0.shape[0])
|
806 |
+
else:
|
807 |
+
raise ValueError("`bounds` must contain 2 elements.")
|
808 |
+
|
809 |
+
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
|
810 |
+
raise ValueError("Method 'lm' doesn't support bounds.")
|
811 |
+
|
812 |
+
if lb.shape != x0.shape or ub.shape != x0.shape:
|
813 |
+
raise ValueError("Inconsistent shapes between bounds and `x0`.")
|
814 |
+
|
815 |
+
if np.any(lb >= ub):
|
816 |
+
raise ValueError("Each lower bound must be strictly less than each "
|
817 |
+
"upper bound.")
|
818 |
+
|
819 |
+
if not in_bounds(x0, lb, ub):
|
820 |
+
raise ValueError("`x0` is infeasible.")
|
821 |
+
|
822 |
+
x_scale = check_x_scale(x_scale, x0)
|
823 |
+
|
824 |
+
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
|
825 |
+
|
826 |
+
if method == 'trf':
|
827 |
+
x0 = make_strictly_feasible(x0, lb, ub)
|
828 |
+
|
829 |
+
def fun_wrapped(x):
|
830 |
+
return np.atleast_1d(fun(x, *args, **kwargs))
|
831 |
+
|
832 |
+
f0 = fun_wrapped(x0)
|
833 |
+
|
834 |
+
if f0.ndim != 1:
|
835 |
+
raise ValueError("`fun` must return at most 1-d array_like. "
|
836 |
+
f"f0.shape: {f0.shape}")
|
837 |
+
|
838 |
+
if not np.all(np.isfinite(f0)):
|
839 |
+
raise ValueError("Residuals are not finite in the initial point.")
|
840 |
+
|
841 |
+
n = x0.size
|
842 |
+
m = f0.size
|
843 |
+
|
844 |
+
if method == 'lm' and m < n:
|
845 |
+
raise ValueError("Method 'lm' doesn't work when the number of "
|
846 |
+
"residuals is less than the number of variables.")
|
847 |
+
|
848 |
+
loss_function = construct_loss_function(m, loss, f_scale)
|
849 |
+
if callable(loss):
|
850 |
+
rho = loss_function(f0)
|
851 |
+
if rho.shape != (3, m):
|
852 |
+
raise ValueError("The return value of `loss` callable has wrong "
|
853 |
+
"shape.")
|
854 |
+
initial_cost = 0.5 * np.sum(rho[0])
|
855 |
+
elif loss_function is not None:
|
856 |
+
initial_cost = loss_function(f0, cost_only=True)
|
857 |
+
else:
|
858 |
+
initial_cost = 0.5 * np.dot(f0, f0)
|
859 |
+
|
860 |
+
if callable(jac):
|
861 |
+
J0 = jac(x0, *args, **kwargs)
|
862 |
+
|
863 |
+
if issparse(J0):
|
864 |
+
J0 = J0.tocsr()
|
865 |
+
|
866 |
+
def jac_wrapped(x, _=None):
|
867 |
+
return jac(x, *args, **kwargs).tocsr()
|
868 |
+
|
869 |
+
elif isinstance(J0, LinearOperator):
|
870 |
+
def jac_wrapped(x, _=None):
|
871 |
+
return jac(x, *args, **kwargs)
|
872 |
+
|
873 |
+
else:
|
874 |
+
J0 = np.atleast_2d(J0)
|
875 |
+
|
876 |
+
def jac_wrapped(x, _=None):
|
877 |
+
return np.atleast_2d(jac(x, *args, **kwargs))
|
878 |
+
|
879 |
+
else: # Estimate Jacobian by finite differences.
|
880 |
+
if method == 'lm':
|
881 |
+
if jac_sparsity is not None:
|
882 |
+
raise ValueError("method='lm' does not support "
|
883 |
+
"`jac_sparsity`.")
|
884 |
+
|
885 |
+
if jac != '2-point':
|
886 |
+
warn(f"jac='{jac}' works equivalently to '2-point' for method='lm'.",
|
887 |
+
stacklevel=2)
|
888 |
+
|
889 |
+
J0 = jac_wrapped = None
|
890 |
+
else:
|
891 |
+
if jac_sparsity is not None and tr_solver == 'exact':
|
892 |
+
raise ValueError("tr_solver='exact' is incompatible "
|
893 |
+
"with `jac_sparsity`.")
|
894 |
+
|
895 |
+
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
|
896 |
+
|
897 |
+
def jac_wrapped(x, f):
|
898 |
+
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
|
899 |
+
f0=f, bounds=bounds, args=args,
|
900 |
+
kwargs=kwargs, sparsity=jac_sparsity)
|
901 |
+
if J.ndim != 2: # J is guaranteed not sparse.
|
902 |
+
J = np.atleast_2d(J)
|
903 |
+
|
904 |
+
return J
|
905 |
+
|
906 |
+
J0 = jac_wrapped(x0, f0)
|
907 |
+
|
908 |
+
if J0 is not None:
|
909 |
+
if J0.shape != (m, n):
|
910 |
+
raise ValueError(
|
911 |
+
f"The return value of `jac` has wrong shape: expected {(m, n)}, "
|
912 |
+
f"actual {J0.shape}."
|
913 |
+
)
|
914 |
+
|
915 |
+
if not isinstance(J0, np.ndarray):
|
916 |
+
if method == 'lm':
|
917 |
+
raise ValueError("method='lm' works only with dense "
|
918 |
+
"Jacobian matrices.")
|
919 |
+
|
920 |
+
if tr_solver == 'exact':
|
921 |
+
raise ValueError(
|
922 |
+
"tr_solver='exact' works only with dense "
|
923 |
+
"Jacobian matrices.")
|
924 |
+
|
925 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
926 |
+
if isinstance(J0, LinearOperator) and jac_scale:
|
927 |
+
raise ValueError("x_scale='jac' can't be used when `jac` "
|
928 |
+
"returns LinearOperator.")
|
929 |
+
|
930 |
+
if tr_solver is None:
|
931 |
+
if isinstance(J0, np.ndarray):
|
932 |
+
tr_solver = 'exact'
|
933 |
+
else:
|
934 |
+
tr_solver = 'lsmr'
|
935 |
+
|
936 |
+
if method == 'lm':
|
937 |
+
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
|
938 |
+
max_nfev, x_scale, diff_step)
|
939 |
+
|
940 |
+
elif method == 'trf':
|
941 |
+
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
|
942 |
+
gtol, max_nfev, x_scale, loss_function, tr_solver,
|
943 |
+
tr_options.copy(), verbose)
|
944 |
+
|
945 |
+
elif method == 'dogbox':
|
946 |
+
if tr_solver == 'lsmr' and 'regularize' in tr_options:
|
947 |
+
warn("The keyword 'regularize' in `tr_options` is not relevant "
|
948 |
+
"for 'dogbox' method.",
|
949 |
+
stacklevel=2)
|
950 |
+
tr_options = tr_options.copy()
|
951 |
+
del tr_options['regularize']
|
952 |
+
|
953 |
+
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
|
954 |
+
xtol, gtol, max_nfev, x_scale, loss_function,
|
955 |
+
tr_solver, tr_options, verbose)
|
956 |
+
|
957 |
+
result.message = TERMINATION_MESSAGES[result.status]
|
958 |
+
result.success = result.status > 0
|
959 |
+
|
960 |
+
if verbose >= 1:
|
961 |
+
print(result.message)
|
962 |
+
print("Function evaluations {}, initial cost {:.4e}, final cost "
|
963 |
+
"{:.4e}, first-order optimality {:.2e}."
|
964 |
+
.format(result.nfev, initial_cost, result.cost,
|
965 |
+
result.optimality))
|
966 |
+
|
967 |
+
return result
|