applied-ai-018 commited on
Commit
d76c488
·
verified ·
1 Parent(s): 6fc8360

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py +183 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py +1225 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py +460 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py +6 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +217 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +564 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py +407 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py +51 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py +32 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py +346 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bounded-variable least-squares algorithm."""
2
+ import numpy as np
3
+ from numpy.linalg import norm, lstsq
4
+ from scipy.optimize import OptimizeResult
5
+
6
+ from .common import print_header_linear, print_iteration_linear
7
+
8
+
9
+ def compute_kkt_optimality(g, on_bound):
10
+ """Compute the maximum violation of KKT conditions."""
11
+ g_kkt = g * on_bound
12
+ free_set = on_bound == 0
13
+ g_kkt[free_set] = np.abs(g[free_set])
14
+ return np.max(g_kkt)
15
+
16
+
17
+ def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None):
18
+ m, n = A.shape
19
+
20
+ x = x_lsq.copy()
21
+ on_bound = np.zeros(n)
22
+
23
+ mask = x <= lb
24
+ x[mask] = lb[mask]
25
+ on_bound[mask] = -1
26
+
27
+ mask = x >= ub
28
+ x[mask] = ub[mask]
29
+ on_bound[mask] = 1
30
+
31
+ free_set = on_bound == 0
32
+ active_set = ~free_set
33
+ free_set, = np.nonzero(free_set)
34
+
35
+ r = A.dot(x) - b
36
+ cost = 0.5 * np.dot(r, r)
37
+ initial_cost = cost
38
+ g = A.T.dot(r)
39
+
40
+ cost_change = None
41
+ step_norm = None
42
+ iteration = 0
43
+
44
+ if verbose == 2:
45
+ print_header_linear()
46
+
47
+ # This is the initialization loop. The requirement is that the
48
+ # least-squares solution on free variables is feasible before BVLS starts.
49
+ # One possible initialization is to set all variables to lower or upper
50
+ # bounds, but many iterations may be required from this state later on.
51
+ # The implemented ad-hoc procedure which intuitively should give a better
52
+ # initial state: find the least-squares solution on current free variables,
53
+ # if its feasible then stop, otherwise, set violating variables to
54
+ # corresponding bounds and continue on the reduced set of free variables.
55
+
56
+ while free_set.size > 0:
57
+ if verbose == 2:
58
+ optimality = compute_kkt_optimality(g, on_bound)
59
+ print_iteration_linear(iteration, cost, cost_change, step_norm,
60
+ optimality)
61
+
62
+ iteration += 1
63
+ x_free_old = x[free_set].copy()
64
+
65
+ A_free = A[:, free_set]
66
+ b_free = b - A.dot(x * active_set)
67
+ z = lstsq(A_free, b_free, rcond=rcond)[0]
68
+
69
+ lbv = z < lb[free_set]
70
+ ubv = z > ub[free_set]
71
+ v = lbv | ubv
72
+
73
+ if np.any(lbv):
74
+ ind = free_set[lbv]
75
+ x[ind] = lb[ind]
76
+ active_set[ind] = True
77
+ on_bound[ind] = -1
78
+
79
+ if np.any(ubv):
80
+ ind = free_set[ubv]
81
+ x[ind] = ub[ind]
82
+ active_set[ind] = True
83
+ on_bound[ind] = 1
84
+
85
+ ind = free_set[~v]
86
+ x[ind] = z[~v]
87
+
88
+ r = A.dot(x) - b
89
+ cost_new = 0.5 * np.dot(r, r)
90
+ cost_change = cost - cost_new
91
+ cost = cost_new
92
+ g = A.T.dot(r)
93
+ step_norm = norm(x[free_set] - x_free_old)
94
+
95
+ if np.any(v):
96
+ free_set = free_set[~v]
97
+ else:
98
+ break
99
+
100
+ if max_iter is None:
101
+ max_iter = n
102
+ max_iter += iteration
103
+
104
+ termination_status = None
105
+
106
+ # Main BVLS loop.
107
+
108
+ optimality = compute_kkt_optimality(g, on_bound)
109
+ for iteration in range(iteration, max_iter): # BVLS Loop A
110
+ if verbose == 2:
111
+ print_iteration_linear(iteration, cost, cost_change,
112
+ step_norm, optimality)
113
+
114
+ if optimality < tol:
115
+ termination_status = 1
116
+
117
+ if termination_status is not None:
118
+ break
119
+
120
+ move_to_free = np.argmax(g * on_bound)
121
+ on_bound[move_to_free] = 0
122
+
123
+ while True: # BVLS Loop B
124
+
125
+ free_set = on_bound == 0
126
+ active_set = ~free_set
127
+ free_set, = np.nonzero(free_set)
128
+
129
+ x_free = x[free_set]
130
+ x_free_old = x_free.copy()
131
+ lb_free = lb[free_set]
132
+ ub_free = ub[free_set]
133
+
134
+ A_free = A[:, free_set]
135
+ b_free = b - A.dot(x * active_set)
136
+ z = lstsq(A_free, b_free, rcond=rcond)[0]
137
+
138
+ lbv, = np.nonzero(z < lb_free)
139
+ ubv, = np.nonzero(z > ub_free)
140
+ v = np.hstack((lbv, ubv))
141
+
142
+ if v.size > 0:
143
+ alphas = np.hstack((
144
+ lb_free[lbv] - x_free[lbv],
145
+ ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
146
+
147
+ i = np.argmin(alphas)
148
+ i_free = v[i]
149
+ alpha = alphas[i]
150
+
151
+ x_free *= 1 - alpha
152
+ x_free += alpha * z
153
+ x[free_set] = x_free
154
+
155
+ if i < lbv.size:
156
+ on_bound[free_set[i_free]] = -1
157
+ else:
158
+ on_bound[free_set[i_free]] = 1
159
+ else:
160
+ x_free = z
161
+ x[free_set] = x_free
162
+ break
163
+
164
+ step_norm = norm(x_free - x_free_old)
165
+
166
+ r = A.dot(x) - b
167
+ cost_new = 0.5 * np.dot(r, r)
168
+ cost_change = cost - cost_new
169
+
170
+ if cost_change < tol * cost:
171
+ termination_status = 2
172
+ cost = cost_new
173
+
174
+ g = A.T.dot(r)
175
+ optimality = compute_kkt_optimality(g, on_bound)
176
+
177
+ if termination_status is None:
178
+ termination_status = 0
179
+
180
+ return OptimizeResult(
181
+ x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
182
+ nit=iteration + 1, status=termination_status,
183
+ initial_cost=initial_cost)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (236 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for low memory simplicial complex structures."""
2
+ import copy
3
+ import logging
4
+ import itertools
5
+ import decimal
6
+ from functools import cache
7
+
8
+ import numpy
9
+
10
+ from ._vertex import (VertexCacheField, VertexCacheIndex)
11
+
12
+
13
+ class Complex:
14
+ """
15
+ Base class for a simplicial complex described as a cache of vertices
16
+ together with their connections.
17
+
18
+ Important methods:
19
+ Domain triangulation:
20
+ Complex.triangulate, Complex.split_generation
21
+ Triangulating arbitrary points (must be traingulable,
22
+ may exist outside domain):
23
+ Complex.triangulate(sample_set)
24
+ Converting another simplicial complex structure data type to the
25
+ structure used in Complex (ex. OBJ wavefront)
26
+ Complex.convert(datatype, data)
27
+
28
+ Important objects:
29
+ HC.V: The cache of vertices and their connection
30
+ HC.H: Storage structure of all vertex groups
31
+
32
+ Parameters
33
+ ----------
34
+ dim : int
35
+ Spatial dimensionality of the complex R^dim
36
+ domain : list of tuples, optional
37
+ The bounds [x_l, x_u]^dim of the hyperrectangle space
38
+ ex. The default domain is the hyperrectangle [0, 1]^dim
39
+ Note: The domain must be convex, non-convex spaces can be cut
40
+ away from this domain using the non-linear
41
+ g_cons functions to define any arbitrary domain
42
+ (these domains may also be disconnected from each other)
43
+ sfield :
44
+ A scalar function defined in the associated domain f: R^dim --> R
45
+ sfield_args : tuple
46
+ Additional arguments to be passed to `sfield`
47
+ vfield :
48
+ A scalar function defined in the associated domain
49
+ f: R^dim --> R^m
50
+ (for example a gradient function of the scalar field)
51
+ vfield_args : tuple
52
+ Additional arguments to be passed to vfield
53
+ symmetry : None or list
54
+ Specify if the objective function contains symmetric variables.
55
+ The search space (and therefore performance) is decreased by up to
56
+ O(n!) times in the fully symmetric case.
57
+
58
+ E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
59
+
60
+ In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
61
+ x_6 are symmetric to x_4, this can be specified to the solver as:
62
+
63
+ symmetry = [0, # Variable 1
64
+ 0, # symmetric to variable 1
65
+ 0, # symmetric to variable 1
66
+ 3, # Variable 4
67
+ 3, # symmetric to variable 4
68
+ 3, # symmetric to variable 4
69
+ ]
70
+
71
+ constraints : dict or sequence of dict, optional
72
+ Constraints definition.
73
+ Function(s) ``R**n`` in the form::
74
+
75
+ g(x) <= 0 applied as g : R^n -> R^m
76
+ h(x) == 0 applied as h : R^n -> R^p
77
+
78
+ Each constraint is defined in a dictionary with fields:
79
+
80
+ type : str
81
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
82
+ fun : callable
83
+ The function defining the constraint.
84
+ jac : callable, optional
85
+ The Jacobian of `fun` (only for SLSQP).
86
+ args : sequence, optional
87
+ Extra arguments to be passed to the function and Jacobian.
88
+
89
+ Equality constraint means that the constraint function result is to
90
+ be zero whereas inequality means that it is to be
91
+ non-negative.constraints : dict or sequence of dict, optional
92
+ Constraints definition.
93
+ Function(s) ``R**n`` in the form::
94
+
95
+ g(x) <= 0 applied as g : R^n -> R^m
96
+ h(x) == 0 applied as h : R^n -> R^p
97
+
98
+ Each constraint is defined in a dictionary with fields:
99
+
100
+ type : str
101
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
102
+ fun : callable
103
+ The function defining the constraint.
104
+ jac : callable, optional
105
+ The Jacobian of `fun` (unused).
106
+ args : sequence, optional
107
+ Extra arguments to be passed to the function and Jacobian.
108
+
109
+ Equality constraint means that the constraint function result is to
110
+ be zero whereas inequality means that it is to be non-negative.
111
+
112
+ workers : int optional
113
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
114
+ functions in parallel.
115
+ """
116
+ def __init__(self, dim, domain=None, sfield=None, sfield_args=(),
117
+ symmetry=None, constraints=None, workers=1):
118
+ self.dim = dim
119
+
120
+ # Domains
121
+ self.domain = domain
122
+ if domain is None:
123
+ self.bounds = [(0.0, 1.0), ] * dim
124
+ else:
125
+ self.bounds = domain
126
+ self.symmetry = symmetry
127
+ # here in init to avoid if checks
128
+
129
+ # Field functions
130
+ self.sfield = sfield
131
+ self.sfield_args = sfield_args
132
+
133
+ # Process constraints
134
+ # Constraints
135
+ # Process constraint dict sequence:
136
+ if constraints is not None:
137
+ self.min_cons = constraints
138
+ self.g_cons = []
139
+ self.g_args = []
140
+ if not isinstance(constraints, (tuple, list)):
141
+ constraints = (constraints,)
142
+
143
+ for cons in constraints:
144
+ if cons['type'] in ('ineq'):
145
+ self.g_cons.append(cons['fun'])
146
+ try:
147
+ self.g_args.append(cons['args'])
148
+ except KeyError:
149
+ self.g_args.append(())
150
+ self.g_cons = tuple(self.g_cons)
151
+ self.g_args = tuple(self.g_args)
152
+ else:
153
+ self.g_cons = None
154
+ self.g_args = None
155
+
156
+ # Homology properties
157
+ self.gen = 0
158
+ self.perm_cycle = 0
159
+
160
+ # Every cell is stored in a list of its generation,
161
+ # ex. the initial cell is stored in self.H[0]
162
+ # 1st get new cells are stored in self.H[1] etc.
163
+ # When a cell is sub-generated it is removed from this list
164
+
165
+ self.H = [] # Storage structure of vertex groups
166
+
167
+ # Cache of all vertices
168
+ if (sfield is not None) or (self.g_cons is not None):
169
+ # Initiate a vertex cache and an associated field cache, note that
170
+ # the field case is always initiated inside the vertex cache if an
171
+ # associated field scalar field is defined:
172
+ if sfield is not None:
173
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
174
+ g_cons=self.g_cons,
175
+ g_cons_args=self.g_args,
176
+ workers=workers)
177
+ elif self.g_cons is not None:
178
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
179
+ g_cons=self.g_cons,
180
+ g_cons_args=self.g_args,
181
+ workers=workers)
182
+ else:
183
+ self.V = VertexCacheIndex()
184
+
185
+ self.V_non_symm = [] # List of non-symmetric vertices
186
+
187
+ def __call__(self):
188
+ return self.H
189
+
190
+ # %% Triangulation methods
191
+ def cyclic_product(self, bounds, origin, supremum, centroid=True):
192
+ """Generate initial triangulation using cyclic product"""
193
+ # Define current hyperrectangle
194
+ vot = tuple(origin)
195
+ vut = tuple(supremum) # Hyperrectangle supremum
196
+ self.V[vot]
197
+ vo = self.V[vot]
198
+ yield vo.x
199
+ self.V[vut].connect(self.V[vot])
200
+ yield vut
201
+ # Cyclic group approach with second x_l --- x_u operation.
202
+
203
+ # These containers store the "lower" and "upper" vertices
204
+ # corresponding to the origin or supremum of every C2 group.
205
+ # It has the structure of `dim` times embedded lists each containing
206
+ # these vertices as the entire complex grows. Bounds[0] has to be done
207
+ # outside the loops before we have symmetric containers.
208
+ # NOTE: This means that bounds[0][1] must always exist
209
+ C0x = [[self.V[vot]]]
210
+ a_vo = copy.copy(list(origin))
211
+ a_vo[0] = vut[0] # Update aN Origin
212
+ a_vo = self.V[tuple(a_vo)]
213
+ # self.V[vot].connect(self.V[tuple(a_vo)])
214
+ self.V[vot].connect(a_vo)
215
+ yield a_vo.x
216
+ C1x = [[a_vo]]
217
+ # C1x = [[self.V[tuple(a_vo)]]]
218
+ ab_C = [] # Container for a + b operations
219
+
220
+ # Loop over remaining bounds
221
+ for i, x in enumerate(bounds[1:]):
222
+ # Update lower and upper containers
223
+ C0x.append([])
224
+ C1x.append([])
225
+ # try to access a second bound (if not, C1 is symmetric)
226
+ try:
227
+ # Early try so that we don't have to copy the cache before
228
+ # moving on to next C1/C2: Try to add the operation of a new
229
+ # C2 product by accessing the upper bound
230
+ x[1]
231
+ # Copy lists for iteration
232
+ cC0x = [x[:] for x in C0x[:i + 1]]
233
+ cC1x = [x[:] for x in C1x[:i + 1]]
234
+ for j, (VL, VU) in enumerate(zip(cC0x, cC1x)):
235
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
236
+ # Build aN vertices for each lower-upper pair in N:
237
+ a_vl = list(vl.x)
238
+ a_vu = list(vu.x)
239
+ a_vl[i + 1] = vut[i + 1]
240
+ a_vu[i + 1] = vut[i + 1]
241
+ a_vl = self.V[tuple(a_vl)]
242
+
243
+ # Connect vertices in N to corresponding vertices
244
+ # in aN:
245
+ vl.connect(a_vl)
246
+
247
+ yield a_vl.x
248
+
249
+ a_vu = self.V[tuple(a_vu)]
250
+ # Connect vertices in N to corresponding vertices
251
+ # in aN:
252
+ vu.connect(a_vu)
253
+
254
+ # Connect new vertex pair in aN:
255
+ a_vl.connect(a_vu)
256
+
257
+ # Connect lower pair to upper (triangulation
258
+ # operation of a + b (two arbitrary operations):
259
+ vl.connect(a_vu)
260
+ ab_C.append((vl, a_vu))
261
+
262
+ # Update the containers
263
+ C0x[i + 1].append(vl)
264
+ C0x[i + 1].append(vu)
265
+ C1x[i + 1].append(a_vl)
266
+ C1x[i + 1].append(a_vu)
267
+
268
+ # Update old containers
269
+ C0x[j].append(a_vl)
270
+ C1x[j].append(a_vu)
271
+
272
+ # Yield new points
273
+ yield a_vu.x
274
+
275
+ # Try to connect aN lower source of previous a + b
276
+ # operation with a aN vertex
277
+ ab_Cc = copy.copy(ab_C)
278
+
279
+ for vp in ab_Cc:
280
+ b_v = list(vp[0].x)
281
+ ab_v = list(vp[1].x)
282
+ b_v[i + 1] = vut[i + 1]
283
+ ab_v[i + 1] = vut[i + 1]
284
+ b_v = self.V[tuple(b_v)] # b + vl
285
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
286
+ # Note o---o is already connected
287
+ vp[0].connect(ab_v) # o-s
288
+ b_v.connect(ab_v) # s-s
289
+
290
+ # Add new list of cross pairs
291
+ ab_C.append((vp[0], ab_v))
292
+ ab_C.append((b_v, ab_v))
293
+
294
+ except IndexError:
295
+ cC0x = C0x[i]
296
+ cC1x = C1x[i]
297
+ VL, VU = cC0x, cC1x
298
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
299
+ # Build aN vertices for each lower-upper pair in N:
300
+ a_vu = list(vu.x)
301
+ a_vu[i + 1] = vut[i + 1]
302
+ # Connect vertices in N to corresponding vertices
303
+ # in aN:
304
+ a_vu = self.V[tuple(a_vu)]
305
+ # Connect vertices in N to corresponding vertices
306
+ # in aN:
307
+ vu.connect(a_vu)
308
+ # Connect new vertex pair in aN:
309
+ # a_vl.connect(a_vu)
310
+ # Connect lower pair to upper (triangulation
311
+ # operation of a + b (two arbitrary operations):
312
+ vl.connect(a_vu)
313
+ ab_C.append((vl, a_vu))
314
+ C0x[i + 1].append(vu)
315
+ C1x[i + 1].append(a_vu)
316
+ # Yield new points
317
+ a_vu.connect(self.V[vut])
318
+ yield a_vu.x
319
+ ab_Cc = copy.copy(ab_C)
320
+ for vp in ab_Cc:
321
+ if vp[1].x[i] == vut[i]:
322
+ ab_v = list(vp[1].x)
323
+ ab_v[i + 1] = vut[i + 1]
324
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
325
+ # Note o---o is already connected
326
+ vp[0].connect(ab_v) # o-s
327
+
328
+ # Add new list of cross pairs
329
+ ab_C.append((vp[0], ab_v))
330
+
331
+ # Clean class trash
332
+ try:
333
+ del C0x
334
+ del cC0x
335
+ del C1x
336
+ del cC1x
337
+ del ab_C
338
+ del ab_Cc
339
+ except UnboundLocalError:
340
+ pass
341
+
342
+ # Extra yield to ensure that the triangulation is completed
343
+ if centroid:
344
+ vo = self.V[vot]
345
+ vs = self.V[vut]
346
+ # Disconnect the origin and supremum
347
+ vo.disconnect(vs)
348
+ # Build centroid
349
+ vc = self.split_edge(vot, vut)
350
+ for v in vo.nn:
351
+ v.connect(vc)
352
+ yield vc.x
353
+ return vc.x
354
+ else:
355
+ yield vut
356
+ return vut
357
+
358
+ def triangulate(self, n=None, symmetry=None, centroid=True,
359
+ printout=False):
360
+ """
361
+ Triangulate the initial domain, if n is not None then a limited number
362
+ of points will be generated
363
+
364
+ Parameters
365
+ ----------
366
+ n : int, Number of points to be sampled.
367
+ symmetry :
368
+
369
+ Ex. Dictionary/hashtable
370
+ f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
371
+
372
+ symmetry = symmetry[0]: 0, # Variable 1
373
+ symmetry[1]: 0, # symmetric to variable 1
374
+ symmetry[2]: 0, # symmetric to variable 1
375
+ symmetry[3]: 3, # Variable 4
376
+ symmetry[4]: 3, # symmetric to variable 4
377
+ symmetry[5]: 3, # symmetric to variable 4
378
+ }
379
+ centroid : bool, if True add a central point to the hypercube
380
+ printout : bool, if True print out results
381
+
382
+ NOTES:
383
+ ------
384
+ Rather than using the combinatorial algorithm to connect vertices we
385
+ make the following observation:
386
+
387
+ The bound pairs are similar a C2 cyclic group and the structure is
388
+ formed using the cartesian product:
389
+
390
+ H = C2 x C2 x C2 ... x C2 (dim times)
391
+
392
+ So construct any normal subgroup N and consider H/N first, we connect
393
+ all vertices within N (ex. N is C2 (the first dimension), then we move
394
+ to a left coset aN (an operation moving around the defined H/N group by
395
+ for example moving from the lower bound in C2 (dimension 2) to the
396
+ higher bound in C2. During this operation connection all the vertices.
397
+ Now repeat the N connections. Note that these elements can be connected
398
+ in parallel.
399
+ """
400
+ # Inherit class arguments
401
+ if symmetry is None:
402
+ symmetry = self.symmetry
403
+ # Build origin and supremum vectors
404
+ origin = [i[0] for i in self.bounds]
405
+ self.origin = origin
406
+ supremum = [i[1] for i in self.bounds]
407
+
408
+ self.supremum = supremum
409
+
410
+ if symmetry is None:
411
+ cbounds = self.bounds
412
+ else:
413
+ cbounds = copy.copy(self.bounds)
414
+ for i, j in enumerate(symmetry):
415
+ if i is not j:
416
+ # pop second entry on second symmetry vars
417
+ cbounds[i] = [self.bounds[symmetry[i]][0]]
418
+ # Sole (first) entry is the sup value and there is no
419
+ # origin:
420
+ cbounds[i] = [self.bounds[symmetry[i]][1]]
421
+ if (self.bounds[symmetry[i]] is not
422
+ self.bounds[symmetry[j]]):
423
+ logging.warning(f"Variable {i} was specified as "
424
+ f"symmetetric to variable {j}, however"
425
+ f", the bounds {i} ="
426
+ f" {self.bounds[symmetry[i]]} and {j}"
427
+ f" ="
428
+ f" {self.bounds[symmetry[j]]} do not "
429
+ f"match, the mismatch was ignored in "
430
+ f"the initial triangulation.")
431
+ cbounds[i] = self.bounds[symmetry[j]]
432
+
433
+ if n is None:
434
+ # Build generator
435
+ self.cp = self.cyclic_product(cbounds, origin, supremum, centroid)
436
+ for i in self.cp:
437
+ i
438
+
439
+ try:
440
+ self.triangulated_vectors.append((tuple(self.origin),
441
+ tuple(self.supremum)))
442
+ except (AttributeError, KeyError):
443
+ self.triangulated_vectors = [(tuple(self.origin),
444
+ tuple(self.supremum))]
445
+
446
+ else:
447
+ # Check if generator already exists
448
+ try:
449
+ self.cp
450
+ except (AttributeError, KeyError):
451
+ self.cp = self.cyclic_product(cbounds, origin, supremum,
452
+ centroid)
453
+
454
+ try:
455
+ while len(self.V.cache) < n:
456
+ next(self.cp)
457
+ except StopIteration:
458
+ try:
459
+ self.triangulated_vectors.append((tuple(self.origin),
460
+ tuple(self.supremum)))
461
+ except (AttributeError, KeyError):
462
+ self.triangulated_vectors = [(tuple(self.origin),
463
+ tuple(self.supremum))]
464
+
465
+ if printout:
466
+ # for v in self.C0():
467
+ # v.print_out()
468
+ for v in self.V.cache:
469
+ self.V[v].print_out()
470
+
471
+ return
472
+
473
+ def refine(self, n=1):
474
+ if n is None:
475
+ try:
476
+ self.triangulated_vectors
477
+ self.refine_all()
478
+ return
479
+ except AttributeError as ae:
480
+ if str(ae) == "'Complex' object has no attribute " \
481
+ "'triangulated_vectors'":
482
+ self.triangulate(symmetry=self.symmetry)
483
+ return
484
+ else:
485
+ raise
486
+
487
+ nt = len(self.V.cache) + n # Target number of total vertices
488
+ # In the outer while loop we iterate until we have added an extra `n`
489
+ # vertices to the complex:
490
+ while len(self.V.cache) < nt: # while loop 1
491
+ try: # try 1
492
+ # Try to access triangulated_vectors, this should only be
493
+ # defined if an initial triangulation has already been
494
+ # performed:
495
+ self.triangulated_vectors
496
+ # Try a usual iteration of the current generator, if it
497
+ # does not exist or is exhausted then produce a new generator
498
+ try: # try 2
499
+ next(self.rls)
500
+ except (AttributeError, StopIteration, KeyError):
501
+ vp = self.triangulated_vectors[0]
502
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
503
+ next(self.rls)
504
+
505
+ except (AttributeError, KeyError):
506
+ # If an initial triangulation has not been completed, then
507
+ # we start/continue the initial triangulation targeting `nt`
508
+ # vertices, if nt is greater than the initial number of
509
+ # vertices then the `refine` routine will move back to try 1.
510
+ self.triangulate(nt, self.symmetry)
511
+ return
512
+
513
+ def refine_all(self, centroids=True):
514
+ """Refine the entire domain of the current complex."""
515
+ try:
516
+ self.triangulated_vectors
517
+ tvs = copy.copy(self.triangulated_vectors)
518
+ for i, vp in enumerate(tvs):
519
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
520
+ for i in self.rls:
521
+ i
522
+ except AttributeError as ae:
523
+ if str(ae) == "'Complex' object has no attribute " \
524
+ "'triangulated_vectors'":
525
+ self.triangulate(symmetry=self.symmetry, centroid=centroids)
526
+ else:
527
+ raise
528
+
529
+ # This adds a centroid to every new sub-domain generated and defined
530
+ # by self.triangulated_vectors, in addition the vertices ! to complete
531
+ # the triangulation
532
+ return
533
+
534
+ def refine_local_space(self, origin, supremum, bounds, centroid=1):
535
+ # Copy for later removal
536
+ origin_c = copy.copy(origin)
537
+ supremum_c = copy.copy(supremum)
538
+
539
+ # Initiate local variables redefined in later inner `for` loop:
540
+ vl, vu, a_vu = None, None, None
541
+
542
+ # Change the vector orientation so that it is only increasing
543
+ s_ov = list(origin)
544
+ s_origin = list(origin)
545
+ s_sv = list(supremum)
546
+ s_supremum = list(supremum)
547
+ for i, vi in enumerate(s_origin):
548
+ if s_ov[i] > s_sv[i]:
549
+ s_origin[i] = s_sv[i]
550
+ s_supremum[i] = s_ov[i]
551
+
552
+ vot = tuple(s_origin)
553
+ vut = tuple(s_supremum) # Hyperrectangle supremum
554
+
555
+ vo = self.V[vot] # initiate if doesn't exist yet
556
+ vs = self.V[vut]
557
+ # Start by finding the old centroid of the new space:
558
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
559
+
560
+ # Find set of extreme vertices in current local space
561
+ sup_set = copy.copy(vco.nn)
562
+ # Cyclic group approach with second x_l --- x_u operation.
563
+
564
+ # These containers store the "lower" and "upper" vertices
565
+ # corresponding to the origin or supremum of every C2 group.
566
+ # It has the structure of `dim` times embedded lists each containing
567
+ # these vertices as the entire complex grows. Bounds[0] has to be done
568
+ # outside the loops before we have symmetric containers.
569
+ # NOTE: This means that bounds[0][1] must always exist
570
+
571
+ a_vl = copy.copy(list(vot))
572
+ a_vl[0] = vut[0] # Update aN Origin
573
+ if tuple(a_vl) not in self.V.cache:
574
+ vo = self.V[vot] # initiate if doesn't exist yet
575
+ vs = self.V[vut]
576
+ # Start by finding the old centroid of the new space:
577
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
578
+
579
+ # Find set of extreme vertices in current local space
580
+ sup_set = copy.copy(vco.nn)
581
+ a_vl = copy.copy(list(vot))
582
+ a_vl[0] = vut[0] # Update aN Origin
583
+ a_vl = self.V[tuple(a_vl)]
584
+ else:
585
+ a_vl = self.V[tuple(a_vl)]
586
+
587
+ c_v = self.split_edge(vo.x, a_vl.x)
588
+ c_v.connect(vco)
589
+ yield c_v.x
590
+ Cox = [[vo]]
591
+ Ccx = [[c_v]]
592
+ Cux = [[a_vl]]
593
+ ab_C = [] # Container for a + b operations
594
+ s_ab_C = [] # Container for symmetric a + b operations
595
+
596
+ # Loop over remaining bounds
597
+ for i, x in enumerate(bounds[1:]):
598
+ # Update lower and upper containers
599
+ Cox.append([])
600
+ Ccx.append([])
601
+ Cux.append([])
602
+ # try to access a second bound (if not, C1 is symmetric)
603
+ try:
604
+ t_a_vl = list(vot)
605
+ t_a_vl[i + 1] = vut[i + 1]
606
+
607
+ # New: lists are used anyway, so copy all
608
+ # %%
609
+ # Copy lists for iteration
610
+ cCox = [x[:] for x in Cox[:i + 1]]
611
+ cCcx = [x[:] for x in Ccx[:i + 1]]
612
+ cCux = [x[:] for x in Cux[:i + 1]]
613
+ # Try to connect aN lower source of previous a + b
614
+ # operation with a aN vertex
615
+ ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the
616
+ # (VL, VC, VU) for-loop, but we use the copy of the list in the
617
+ # ab_Cc for-loop.
618
+ s_ab_Cc = copy.copy(s_ab_C)
619
+
620
+ # Early try so that we don't have to copy the cache before
621
+ # moving on to next C1/C2: Try to add the operation of a new
622
+ # C2 product by accessing the upper bound
623
+ if tuple(t_a_vl) not in self.V.cache:
624
+ # Raise error to continue symmetric refine
625
+ raise IndexError
626
+ t_a_vu = list(vut)
627
+ t_a_vu[i + 1] = vut[i + 1]
628
+ if tuple(t_a_vu) not in self.V.cache:
629
+ # Raise error to continue symmetric refine:
630
+ raise IndexError
631
+
632
+ for vectors in s_ab_Cc:
633
+ # s_ab_C.append([c_vc, vl, vu, a_vu])
634
+ bc_vc = list(vectors[0].x)
635
+ b_vl = list(vectors[1].x)
636
+ b_vu = list(vectors[2].x)
637
+ ba_vu = list(vectors[3].x)
638
+
639
+ bc_vc[i + 1] = vut[i + 1]
640
+ b_vl[i + 1] = vut[i + 1]
641
+ b_vu[i + 1] = vut[i + 1]
642
+ ba_vu[i + 1] = vut[i + 1]
643
+
644
+ bc_vc = self.V[tuple(bc_vc)]
645
+ bc_vc.connect(vco) # NOTE: Unneeded?
646
+ yield bc_vc
647
+
648
+ # Split to centre, call this centre group "d = 0.5*a"
649
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
650
+ d_bc_vc.connect(bc_vc)
651
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
652
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
653
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
654
+ yield d_bc_vc.x
655
+ b_vl = self.V[tuple(b_vl)]
656
+ bc_vc.connect(b_vl) # Connect aN cross pairs
657
+ d_bc_vc.connect(b_vl) # Connect all to centroid
658
+
659
+ yield b_vl
660
+ b_vu = self.V[tuple(b_vu)]
661
+ bc_vc.connect(b_vu) # Connect aN cross pairs
662
+ d_bc_vc.connect(b_vu) # Connect all to centroid
663
+
664
+ b_vl_c = self.split_edge(b_vu.x, b_vl.x)
665
+ bc_vc.connect(b_vl_c)
666
+
667
+ yield b_vu
668
+ ba_vu = self.V[tuple(ba_vu)]
669
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
670
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
671
+
672
+ # Split the a + b edge of the initial triangulation:
673
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
674
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
675
+ b_vu_c = self.split_edge(b_vu.x, ba_vu.x)
676
+ bc_vc.connect(b_vu_c)
677
+ yield os_v.x # often equal to vco, but not always
678
+ yield ss_v.x # often equal to bc_vu, but not always
679
+ yield ba_vu
680
+ # Split remaining to centre, call this centre group
681
+ # "d = 0.5*a"
682
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
683
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
684
+ yield d_bc_vc.x
685
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
686
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
687
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
688
+ yield d_b_vl.x
689
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
690
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
691
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
692
+ yield d_b_vu.x
693
+ d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x)
694
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
695
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
696
+ yield d_ba_vu
697
+
698
+ # comb = [c_vc, vl, vu, a_vl, a_vu,
699
+ # bc_vc, b_vl, b_vu, ba_vl, ba_vu]
700
+ comb = [vl, vu, a_vu,
701
+ b_vl, b_vu, ba_vu]
702
+ comb_iter = itertools.combinations(comb, 2)
703
+ for vecs in comb_iter:
704
+ self.split_edge(vecs[0].x, vecs[1].x)
705
+ # Add new list of cross pairs
706
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
707
+ ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev
708
+
709
+ for vectors in ab_Cc:
710
+ bc_vc = list(vectors[0].x)
711
+ b_vl = list(vectors[1].x)
712
+ b_vu = list(vectors[2].x)
713
+ ba_vl = list(vectors[3].x)
714
+ ba_vu = list(vectors[4].x)
715
+ bc_vc[i + 1] = vut[i + 1]
716
+ b_vl[i + 1] = vut[i + 1]
717
+ b_vu[i + 1] = vut[i + 1]
718
+ ba_vl[i + 1] = vut[i + 1]
719
+ ba_vu[i + 1] = vut[i + 1]
720
+ bc_vc = self.V[tuple(bc_vc)]
721
+ bc_vc.connect(vco) # NOTE: Unneeded?
722
+ yield bc_vc
723
+
724
+ # Split to centre, call this centre group "d = 0.5*a"
725
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
726
+ d_bc_vc.connect(bc_vc)
727
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
728
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
729
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
730
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
731
+ yield d_bc_vc.x
732
+ b_vl = self.V[tuple(b_vl)]
733
+ bc_vc.connect(b_vl) # Connect aN cross pairs
734
+ d_bc_vc.connect(b_vl) # Connect all to centroid
735
+ yield b_vl
736
+ b_vu = self.V[tuple(b_vu)]
737
+ bc_vc.connect(b_vu) # Connect aN cross pairs
738
+ d_bc_vc.connect(b_vu) # Connect all to centroid
739
+ yield b_vu
740
+ ba_vl = self.V[tuple(ba_vl)]
741
+ bc_vc.connect(ba_vl) # Connect aN cross pairs
742
+ d_bc_vc.connect(ba_vl) # Connect all to centroid
743
+ self.split_edge(b_vu.x, ba_vl.x)
744
+ yield ba_vl
745
+ ba_vu = self.V[tuple(ba_vu)]
746
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
747
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
748
+ # Split the a + b edge of the initial triangulation:
749
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
750
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
751
+ yield os_v.x # often equal to vco, but not always
752
+ yield ss_v.x # often equal to bc_vu, but not always
753
+ yield ba_vu
754
+ # Split remaining to centre, call this centre group
755
+ # "d = 0.5*a"
756
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
757
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
758
+ yield d_bc_vc.x
759
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
760
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
761
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
762
+ yield d_b_vl.x
763
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
764
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
765
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
766
+ yield d_b_vu.x
767
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
768
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
769
+ d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs
770
+ yield d_ba_vl
771
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
772
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
773
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
774
+ yield d_ba_vu
775
+ c_vc, vl, vu, a_vl, a_vu = vectors
776
+
777
+ comb = [vl, vu, a_vl, a_vu,
778
+ b_vl, b_vu, ba_vl, ba_vu]
779
+ comb_iter = itertools.combinations(comb, 2)
780
+ for vecs in comb_iter:
781
+ self.split_edge(vecs[0].x, vecs[1].x)
782
+
783
+ # Add new list of cross pairs
784
+ ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu))
785
+ ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu))
786
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
787
+ ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl))
788
+
789
+ for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)):
790
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
791
+ # Build aN vertices for each lower-upper C3 group in N:
792
+ a_vl = list(vl.x)
793
+ a_vu = list(vu.x)
794
+ a_vl[i + 1] = vut[i + 1]
795
+ a_vu[i + 1] = vut[i + 1]
796
+ a_vl = self.V[tuple(a_vl)]
797
+ a_vu = self.V[tuple(a_vu)]
798
+ # Note, build (a + vc) later for consistent yields
799
+ # Split the a + b edge of the initial triangulation:
800
+ c_vc = self.split_edge(vl.x, a_vu.x)
801
+ self.split_edge(vl.x, vu.x) # Equal to vc
802
+ # Build cN vertices for each lower-upper C3 group in N:
803
+ c_vc.connect(vco)
804
+ c_vc.connect(vc)
805
+ c_vc.connect(vl) # Connect c + ac operations
806
+ c_vc.connect(vu) # Connect c + ac operations
807
+ c_vc.connect(a_vl) # Connect c + ac operations
808
+ c_vc.connect(a_vu) # Connect c + ac operations
809
+ yield c_vc.x
810
+ c_vl = self.split_edge(vl.x, a_vl.x)
811
+ c_vl.connect(vco)
812
+ c_vc.connect(c_vl) # Connect cN group vertices
813
+ yield c_vl.x
814
+ # yield at end of loop:
815
+ c_vu = self.split_edge(vu.x, a_vu.x)
816
+ c_vu.connect(vco)
817
+ # Connect remaining cN group vertices
818
+ c_vc.connect(c_vu) # Connect cN group vertices
819
+ yield c_vu.x
820
+
821
+ a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ?
822
+ a_vc.connect(vco)
823
+ a_vc.connect(c_vc)
824
+
825
+ # Storage for connecting c + ac operations:
826
+ ab_C.append((c_vc, vl, vu, a_vl, a_vu))
827
+
828
+ # Update the containers
829
+ Cox[i + 1].append(vl)
830
+ Cox[i + 1].append(vc)
831
+ Cox[i + 1].append(vu)
832
+ Ccx[i + 1].append(c_vl)
833
+ Ccx[i + 1].append(c_vc)
834
+ Ccx[i + 1].append(c_vu)
835
+ Cux[i + 1].append(a_vl)
836
+ Cux[i + 1].append(a_vc)
837
+ Cux[i + 1].append(a_vu)
838
+
839
+ # Update old containers
840
+ Cox[j].append(c_vl) # !
841
+ Cox[j].append(a_vl)
842
+ Ccx[j].append(c_vc) # !
843
+ Ccx[j].append(a_vc) # !
844
+ Cux[j].append(c_vu) # !
845
+ Cux[j].append(a_vu)
846
+
847
+ # Yield new points
848
+ yield a_vc.x
849
+
850
+ except IndexError:
851
+ for vectors in ab_Cc:
852
+ ba_vl = list(vectors[3].x)
853
+ ba_vu = list(vectors[4].x)
854
+ ba_vl[i + 1] = vut[i + 1]
855
+ ba_vu[i + 1] = vut[i + 1]
856
+ ba_vu = self.V[tuple(ba_vu)]
857
+ yield ba_vu
858
+ d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s
859
+ yield ba_vu
860
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
861
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
862
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
863
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
864
+ yield d_bc_vc.x
865
+ ba_vl = self.V[tuple(ba_vl)]
866
+ yield ba_vl
867
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
868
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
869
+ d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x)
870
+ yield d_ba_vl
871
+ yield d_ba_vu
872
+ yield d_ba_vc
873
+ c_vc, vl, vu, a_vl, a_vu = vectors
874
+ comb = [vl, vu, a_vl, a_vu,
875
+ ba_vl,
876
+ ba_vu]
877
+ comb_iter = itertools.combinations(comb, 2)
878
+ for vecs in comb_iter:
879
+ self.split_edge(vecs[0].x, vecs[1].x)
880
+
881
+ # Copy lists for iteration
882
+ cCox = Cox[i]
883
+ cCcx = Ccx[i]
884
+ cCux = Cux[i]
885
+ VL, VC, VU = cCox, cCcx, cCux
886
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
887
+ # Build aN vertices for each lower-upper pair in N:
888
+ a_vu = list(vu.x)
889
+ a_vu[i + 1] = vut[i + 1]
890
+
891
+ # Connect vertices in N to corresponding vertices
892
+ # in aN:
893
+ a_vu = self.V[tuple(a_vu)]
894
+ yield a_vl.x
895
+ # Split the a + b edge of the initial triangulation:
896
+ c_vc = self.split_edge(vl.x, a_vu.x)
897
+ self.split_edge(vl.x, vu.x) # Equal to vc
898
+ c_vc.connect(vco)
899
+ c_vc.connect(vc)
900
+ c_vc.connect(vl) # Connect c + ac operations
901
+ c_vc.connect(vu) # Connect c + ac operations
902
+ c_vc.connect(a_vu) # Connect c + ac operations
903
+ yield (c_vc.x)
904
+ c_vu = self.split_edge(vu.x,
905
+ a_vu.x) # yield at end of loop
906
+ c_vu.connect(vco)
907
+ # Connect remaining cN group vertices
908
+ c_vc.connect(c_vu) # Connect cN group vertices
909
+ yield (c_vu.x)
910
+
911
+ # Update the containers
912
+ Cox[i + 1].append(vu)
913
+ Ccx[i + 1].append(c_vu)
914
+ Cux[i + 1].append(a_vu)
915
+
916
+ # Update old containers
917
+ s_ab_C.append([c_vc, vl, vu, a_vu])
918
+
919
+ yield a_vu.x
920
+
921
+ # Clean class trash
922
+ try:
923
+ del Cox
924
+ del Ccx
925
+ del Cux
926
+ del ab_C
927
+ del ab_Cc
928
+ except UnboundLocalError:
929
+ pass
930
+
931
+ try:
932
+ self.triangulated_vectors.remove((tuple(origin_c),
933
+ tuple(supremum_c)))
934
+ except ValueError:
935
+ # Turn this into a logging warning?
936
+ pass
937
+ # Add newly triangulated vectors:
938
+ for vs in sup_set:
939
+ self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x)))
940
+
941
+ # Extra yield to ensure that the triangulation is completed
942
+ if centroid:
943
+ vcn_set = set()
944
+ c_nn_lists = []
945
+ for vs in sup_set:
946
+ # Build centroid
947
+ c_nn = self.vpool(vco.x, vs.x)
948
+ try:
949
+ c_nn.remove(vcn_set)
950
+ except KeyError:
951
+ pass
952
+ c_nn_lists.append(c_nn)
953
+
954
+ for c_nn in c_nn_lists:
955
+ try:
956
+ c_nn.remove(vcn_set)
957
+ except KeyError:
958
+ pass
959
+
960
+ for vs, c_nn in zip(sup_set, c_nn_lists):
961
+ # Build centroid
962
+ vcn = self.split_edge(vco.x, vs.x)
963
+ vcn_set.add(vcn)
964
+ try: # Shouldn't be needed?
965
+ c_nn.remove(vcn_set)
966
+ except KeyError:
967
+ pass
968
+ for vnn in c_nn:
969
+ vcn.connect(vnn)
970
+ yield vcn.x
971
+ else:
972
+ pass
973
+
974
+ yield vut
975
+ return
976
+
977
+ def refine_star(self, v):
978
+ """Refine the star domain of a vertex `v`."""
979
+ # Copy lists before iteration
980
+ vnn = copy.copy(v.nn)
981
+ v1nn = []
982
+ d_v0v1_set = set()
983
+ for v1 in vnn:
984
+ v1nn.append(copy.copy(v1.nn))
985
+
986
+ for v1, v1nn in zip(vnn, v1nn):
987
+ vnnu = v1nn.intersection(vnn)
988
+
989
+ d_v0v1 = self.split_edge(v.x, v1.x)
990
+ for o_d_v0v1 in d_v0v1_set:
991
+ d_v0v1.connect(o_d_v0v1)
992
+ d_v0v1_set.add(d_v0v1)
993
+ for v2 in vnnu:
994
+ d_v1v2 = self.split_edge(v1.x, v2.x)
995
+ d_v0v1.connect(d_v1v2)
996
+ return
997
+
998
+ @cache
999
+ def split_edge(self, v1, v2):
1000
+ v1 = self.V[v1]
1001
+ v2 = self.V[v2]
1002
+ # Destroy original edge, if it exists:
1003
+ v1.disconnect(v2)
1004
+ # Compute vertex on centre of edge:
1005
+ try:
1006
+ vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a
1007
+ except TypeError: # Allow for decimal operations
1008
+ vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a
1009
+
1010
+ vc = self.V[tuple(vct)]
1011
+ # Connect to original 2 vertices to the new centre vertex
1012
+ vc.connect(v1)
1013
+ vc.connect(v2)
1014
+ return vc
1015
+
1016
+ def vpool(self, origin, supremum):
1017
+ vot = tuple(origin)
1018
+ vst = tuple(supremum)
1019
+ # Initiate vertices in case they don't exist
1020
+ vo = self.V[vot]
1021
+ vs = self.V[vst]
1022
+
1023
+ # Remove origin - supremum disconnect
1024
+
1025
+ # Find the lower/upper bounds of the refinement hyperrectangle
1026
+ bl = list(vot)
1027
+ bu = list(vst)
1028
+ for i, (voi, vsi) in enumerate(zip(vot, vst)):
1029
+ if bl[i] > vsi:
1030
+ bl[i] = vsi
1031
+ if bu[i] < voi:
1032
+ bu[i] = voi
1033
+
1034
+ # NOTE: This is mostly done with sets/lists because we aren't sure
1035
+ # how well the numpy arrays will scale to thousands of
1036
+ # dimensions.
1037
+ vn_pool = set()
1038
+ vn_pool.update(vo.nn)
1039
+ vn_pool.update(vs.nn)
1040
+ cvn_pool = copy.copy(vn_pool)
1041
+ for vn in cvn_pool:
1042
+ for i, xi in enumerate(vn.x):
1043
+ if bl[i] <= xi <= bu[i]:
1044
+ pass
1045
+ else:
1046
+ try:
1047
+ vn_pool.remove(vn)
1048
+ except KeyError:
1049
+ pass # NOTE: Not all neigbouds are in initial pool
1050
+ return vn_pool
1051
+
1052
+ def vf_to_vv(self, vertices, simplices):
1053
+ """
1054
+ Convert a vertex-face mesh to a vertex-vertex mesh used by this class
1055
+
1056
+ Parameters
1057
+ ----------
1058
+ vertices : list
1059
+ Vertices
1060
+ simplices : list
1061
+ Simplices
1062
+ """
1063
+ if self.dim > 1:
1064
+ for s in simplices:
1065
+ edges = itertools.combinations(s, self.dim)
1066
+ for e in edges:
1067
+ self.V[tuple(vertices[e[0]])].connect(
1068
+ self.V[tuple(vertices[e[1]])])
1069
+ else:
1070
+ for e in simplices:
1071
+ self.V[tuple(vertices[e[0]])].connect(
1072
+ self.V[tuple(vertices[e[1]])])
1073
+ return
1074
+
1075
+ def connect_vertex_non_symm(self, v_x, near=None):
1076
+ """
1077
+ Adds a vertex at coords v_x to the complex that is not symmetric to the
1078
+ initial triangulation and sub-triangulation.
1079
+
1080
+ If near is specified (for example; a star domain or collections of
1081
+ cells known to contain v) then only those simplices containd in near
1082
+ will be searched, this greatly speeds up the process.
1083
+
1084
+ If near is not specified this method will search the entire simplicial
1085
+ complex structure.
1086
+
1087
+ Parameters
1088
+ ----------
1089
+ v_x : tuple
1090
+ Coordinates of non-symmetric vertex
1091
+ near : set or list
1092
+ List of vertices, these are points near v to check for
1093
+ """
1094
+ if near is None:
1095
+ star = self.V
1096
+ else:
1097
+ star = near
1098
+ # Create the vertex origin
1099
+ if tuple(v_x) in self.V.cache:
1100
+ if self.V[v_x] in self.V_non_symm:
1101
+ pass
1102
+ else:
1103
+ return
1104
+
1105
+ self.V[v_x]
1106
+ found_nn = False
1107
+ S_rows = []
1108
+ for v in star:
1109
+ S_rows.append(v.x)
1110
+
1111
+ S_rows = numpy.array(S_rows)
1112
+ A = numpy.array(S_rows) - numpy.array(v_x)
1113
+ # Iterate through all the possible simplices of S_rows
1114
+ for s_i in itertools.combinations(range(S_rows.shape[0]),
1115
+ r=self.dim + 1):
1116
+ # Check if connected, else s_i is not a simplex
1117
+ valid_simplex = True
1118
+ for i in itertools.combinations(s_i, r=2):
1119
+ # Every combination of vertices must be connected, we check of
1120
+ # the current iteration of all combinations of s_i are
1121
+ # connected we break the loop if it is not.
1122
+ if ((self.V[tuple(S_rows[i[1]])] not in
1123
+ self.V[tuple(S_rows[i[0]])].nn)
1124
+ and (self.V[tuple(S_rows[i[0]])] not in
1125
+ self.V[tuple(S_rows[i[1]])].nn)):
1126
+ valid_simplex = False
1127
+ break
1128
+
1129
+ S = S_rows[tuple([s_i])]
1130
+ if valid_simplex:
1131
+ if self.deg_simplex(S, proj=None):
1132
+ valid_simplex = False
1133
+
1134
+ # If s_i is a valid simplex we can test if v_x is inside si
1135
+ if valid_simplex:
1136
+ # Find the A_j0 value from the precalculated values
1137
+ A_j0 = A[tuple([s_i])]
1138
+ if self.in_simplex(S, v_x, A_j0):
1139
+ found_nn = True
1140
+ # breaks the main for loop, s_i is the target simplex:
1141
+ break
1142
+
1143
+ # Connect the simplex to point
1144
+ if found_nn:
1145
+ for i in s_i:
1146
+ self.V[v_x].connect(self.V[tuple(S_rows[i])])
1147
+ # Attached the simplex to storage for all non-symmetric vertices
1148
+ self.V_non_symm.append(self.V[v_x])
1149
+ # this bool value indicates a successful connection if True:
1150
+ return found_nn
1151
+
1152
+ def in_simplex(self, S, v_x, A_j0=None):
1153
+ """Check if a vector v_x is in simplex `S`.
1154
+
1155
+ Parameters
1156
+ ----------
1157
+ S : array_like
1158
+ Array containing simplex entries of vertices as rows
1159
+ v_x :
1160
+ A candidate vertex
1161
+ A_j0 : array, optional,
1162
+ Allows for A_j0 to be pre-calculated
1163
+
1164
+ Returns
1165
+ -------
1166
+ res : boolean
1167
+ True if `v_x` is in `S`
1168
+ """
1169
+ A_11 = numpy.delete(S, 0, 0) - S[0]
1170
+
1171
+ sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1172
+ if sign_det_A_11 == 0:
1173
+ # NOTE: We keep the variable A_11, but we loop through A_jj
1174
+ # ind=
1175
+ # while sign_det_A_11 == 0:
1176
+ # A_11 = numpy.delete(S, ind, 0) - S[ind]
1177
+ # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1178
+
1179
+ sign_det_A_11 = -1 # TODO: Choose another det of j instead?
1180
+ # TODO: Unlikely to work in many cases
1181
+
1182
+ if A_j0 is None:
1183
+ A_j0 = S - v_x
1184
+
1185
+ for d in range(self.dim + 1):
1186
+ det_A_jj = (-1)**d * sign_det_A_11
1187
+ # TODO: Note that scipy might be faster to add as an optional
1188
+ # dependency
1189
+ sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d,
1190
+ 0)))
1191
+ # TODO: Note if sign_det_A_j0 == then the point is coplanar to the
1192
+ # current simplex facet, so perhaps return True and attach?
1193
+ if det_A_jj == sign_det_A_j0:
1194
+ continue
1195
+ else:
1196
+ return False
1197
+
1198
+ return True
1199
+
1200
+ def deg_simplex(self, S, proj=None):
1201
+ """Test a simplex S for degeneracy (linear dependence in R^dim).
1202
+
1203
+ Parameters
1204
+ ----------
1205
+ S : np.array
1206
+ Simplex with rows as vertex vectors
1207
+ proj : array, optional,
1208
+ If the projection S[1:] - S[0] is already
1209
+ computed it can be added as an optional argument.
1210
+ """
1211
+ # Strategy: we test all combination of faces, if any of the
1212
+ # determinants are zero then the vectors lie on the same face and is
1213
+ # therefore linearly dependent in the space of R^dim
1214
+ if proj is None:
1215
+ proj = S[1:] - S[0]
1216
+
1217
+ # TODO: Is checking the projection of one vertex against faces of other
1218
+ # vertices sufficient? Or do we need to check more vertices in
1219
+ # dimensions higher than 2?
1220
+ # TODO: Literature seems to suggest using proj.T, but why is this
1221
+ # needed?
1222
+ if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance?
1223
+ return True # Simplex is degenerate
1224
+ else:
1225
+ return False # Simplex is not degenerate
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from abc import ABC, abstractmethod
3
+
4
+ import numpy as np
5
+
6
+ from scipy._lib._util import MapWrapper
7
+
8
+
9
+ class VertexBase(ABC):
10
+ """
11
+ Base class for a vertex.
12
+ """
13
+ def __init__(self, x, nn=None, index=None):
14
+ """
15
+ Initiation of a vertex object.
16
+
17
+ Parameters
18
+ ----------
19
+ x : tuple or vector
20
+ The geometric location (domain).
21
+ nn : list, optional
22
+ Nearest neighbour list.
23
+ index : int, optional
24
+ Index of vertex.
25
+ """
26
+ self.x = x
27
+ self.hash = hash(self.x) # Save precomputed hash
28
+
29
+ if nn is not None:
30
+ self.nn = set(nn) # can use .indexupdate to add a new list
31
+ else:
32
+ self.nn = set()
33
+
34
+ self.index = index
35
+
36
+ def __hash__(self):
37
+ return self.hash
38
+
39
+ def __getattr__(self, item):
40
+ if item not in ['x_a']:
41
+ raise AttributeError(f"{type(self)} object has no attribute "
42
+ f"'{item}'")
43
+ if item == 'x_a':
44
+ self.x_a = np.array(self.x)
45
+ return self.x_a
46
+
47
+ @abstractmethod
48
+ def connect(self, v):
49
+ raise NotImplementedError("This method is only implemented with an "
50
+ "associated child of the base class.")
51
+
52
+ @abstractmethod
53
+ def disconnect(self, v):
54
+ raise NotImplementedError("This method is only implemented with an "
55
+ "associated child of the base class.")
56
+
57
+ def star(self):
58
+ """Returns the star domain ``st(v)`` of the vertex.
59
+
60
+ Parameters
61
+ ----------
62
+ v :
63
+ The vertex ``v`` in ``st(v)``
64
+
65
+ Returns
66
+ -------
67
+ st : set
68
+ A set containing all the vertices in ``st(v)``
69
+ """
70
+ self.st = self.nn
71
+ self.st.add(self)
72
+ return self.st
73
+
74
+
75
+ class VertexScalarField(VertexBase):
76
+ """
77
+ Add homology properties of a scalar field f: R^n --> R associated with
78
+ the geometry built from the VertexBase class
79
+ """
80
+
81
+ def __init__(self, x, field=None, nn=None, index=None, field_args=(),
82
+ g_cons=None, g_cons_args=()):
83
+ """
84
+ Parameters
85
+ ----------
86
+ x : tuple,
87
+ vector of vertex coordinates
88
+ field : callable, optional
89
+ a scalar field f: R^n --> R associated with the geometry
90
+ nn : list, optional
91
+ list of nearest neighbours
92
+ index : int, optional
93
+ index of the vertex
94
+ field_args : tuple, optional
95
+ additional arguments to be passed to field
96
+ g_cons : callable, optional
97
+ constraints on the vertex
98
+ g_cons_args : tuple, optional
99
+ additional arguments to be passed to g_cons
100
+
101
+ """
102
+ super().__init__(x, nn=nn, index=index)
103
+
104
+ # Note Vertex is only initiated once for all x so only
105
+ # evaluated once
106
+ # self.feasible = None
107
+
108
+ # self.f is externally defined by the cache to allow parallel
109
+ # processing
110
+ # None type that will break arithmetic operations unless defined
111
+ # self.f = None
112
+
113
+ self.check_min = True
114
+ self.check_max = True
115
+
116
+ def connect(self, v):
117
+ """Connects self to another vertex object v.
118
+
119
+ Parameters
120
+ ----------
121
+ v : VertexBase or VertexScalarField object
122
+ """
123
+ if v is not self and v not in self.nn:
124
+ self.nn.add(v)
125
+ v.nn.add(self)
126
+
127
+ # Flags for checking homology properties:
128
+ self.check_min = True
129
+ self.check_max = True
130
+ v.check_min = True
131
+ v.check_max = True
132
+
133
+ def disconnect(self, v):
134
+ if v in self.nn:
135
+ self.nn.remove(v)
136
+ v.nn.remove(self)
137
+
138
+ # Flags for checking homology properties:
139
+ self.check_min = True
140
+ self.check_max = True
141
+ v.check_min = True
142
+ v.check_max = True
143
+
144
+ def minimiser(self):
145
+ """Check whether this vertex is strictly less than all its
146
+ neighbours"""
147
+ if self.check_min:
148
+ self._min = all(self.f < v.f for v in self.nn)
149
+ self.check_min = False
150
+
151
+ return self._min
152
+
153
+ def maximiser(self):
154
+ """
155
+ Check whether this vertex is strictly greater than all its
156
+ neighbours.
157
+ """
158
+ if self.check_max:
159
+ self._max = all(self.f > v.f for v in self.nn)
160
+ self.check_max = False
161
+
162
+ return self._max
163
+
164
+
165
+ class VertexVectorField(VertexBase):
166
+ """
167
+ Add homology properties of a scalar field f: R^n --> R^m associated with
168
+ the geometry built from the VertexBase class.
169
+ """
170
+
171
+ def __init__(self, x, sfield=None, vfield=None, field_args=(),
172
+ vfield_args=(), g_cons=None,
173
+ g_cons_args=(), nn=None, index=None):
174
+ super().__init__(x, nn=nn, index=index)
175
+
176
+ raise NotImplementedError("This class is still a work in progress")
177
+
178
+
179
+ class VertexCacheBase:
180
+ """Base class for a vertex cache for a simplicial complex."""
181
+ def __init__(self):
182
+
183
+ self.cache = collections.OrderedDict()
184
+ self.nfev = 0 # Feasible points
185
+ self.index = -1
186
+
187
+ def __iter__(self):
188
+ for v in self.cache:
189
+ yield self.cache[v]
190
+ return
191
+
192
+ def size(self):
193
+ """Returns the size of the vertex cache."""
194
+ return self.index + 1
195
+
196
+ def print_out(self):
197
+ headlen = len(f"Vertex cache of size: {len(self.cache)}:")
198
+ print('=' * headlen)
199
+ print(f"Vertex cache of size: {len(self.cache)}:")
200
+ print('=' * headlen)
201
+ for v in self.cache:
202
+ self.cache[v].print_out()
203
+
204
+
205
+ class VertexCube(VertexBase):
206
+ """Vertex class to be used for a pure simplicial complex with no associated
207
+ differential geometry (single level domain that exists in R^n)"""
208
+ def __init__(self, x, nn=None, index=None):
209
+ super().__init__(x, nn=nn, index=index)
210
+
211
+ def connect(self, v):
212
+ if v is not self and v not in self.nn:
213
+ self.nn.add(v)
214
+ v.nn.add(self)
215
+
216
+ def disconnect(self, v):
217
+ if v in self.nn:
218
+ self.nn.remove(v)
219
+ v.nn.remove(self)
220
+
221
+
222
+ class VertexCacheIndex(VertexCacheBase):
223
+ def __init__(self):
224
+ """
225
+ Class for a vertex cache for a simplicial complex without an associated
226
+ field. Useful only for building and visualising a domain complex.
227
+
228
+ Parameters
229
+ ----------
230
+ """
231
+ super().__init__()
232
+ self.Vertex = VertexCube
233
+
234
+ def __getitem__(self, x, nn=None):
235
+ try:
236
+ return self.cache[x]
237
+ except KeyError:
238
+ self.index += 1
239
+ xval = self.Vertex(x, index=self.index)
240
+ # logging.info("New generated vertex at x = {}".format(x))
241
+ # NOTE: Surprisingly high performance increase if logging
242
+ # is commented out
243
+ self.cache[x] = xval
244
+ return self.cache[x]
245
+
246
+
247
+ class VertexCacheField(VertexCacheBase):
248
+ def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(),
249
+ workers=1):
250
+ """
251
+ Class for a vertex cache for a simplicial complex with an associated
252
+ field.
253
+
254
+ Parameters
255
+ ----------
256
+ field : callable
257
+ Scalar or vector field callable.
258
+ field_args : tuple, optional
259
+ Any additional fixed parameters needed to completely specify the
260
+ field function
261
+ g_cons : dict or sequence of dict, optional
262
+ Constraints definition.
263
+ Function(s) ``R**n`` in the form::
264
+ g_cons_args : tuple, optional
265
+ Any additional fixed parameters needed to completely specify the
266
+ constraint functions
267
+ workers : int optional
268
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
269
+ functions in parallel.
270
+
271
+ """
272
+ super().__init__()
273
+ self.index = -1
274
+ self.Vertex = VertexScalarField
275
+ self.field = field
276
+ self.field_args = field_args
277
+ self.wfield = FieldWrapper(field, field_args) # if workers is not 1
278
+
279
+ self.g_cons = g_cons
280
+ self.g_cons_args = g_cons_args
281
+ self.wgcons = ConstraintWrapper(g_cons, g_cons_args)
282
+ self.gpool = set() # A set of tuples to process for feasibility
283
+
284
+ # Field processing objects
285
+ self.fpool = set() # A set of tuples to process for scalar function
286
+ self.sfc_lock = False # True if self.fpool is non-Empty
287
+
288
+ self.workers = workers
289
+ self._mapwrapper = MapWrapper(workers)
290
+
291
+ if workers == 1:
292
+ self.process_gpool = self.proc_gpool
293
+ if g_cons is None:
294
+ self.process_fpool = self.proc_fpool_nog
295
+ else:
296
+ self.process_fpool = self.proc_fpool_g
297
+ else:
298
+ self.process_gpool = self.pproc_gpool
299
+ if g_cons is None:
300
+ self.process_fpool = self.pproc_fpool_nog
301
+ else:
302
+ self.process_fpool = self.pproc_fpool_g
303
+
304
+ def __getitem__(self, x, nn=None):
305
+ try:
306
+ return self.cache[x]
307
+ except KeyError:
308
+ self.index += 1
309
+ xval = self.Vertex(x, field=self.field, nn=nn, index=self.index,
310
+ field_args=self.field_args,
311
+ g_cons=self.g_cons,
312
+ g_cons_args=self.g_cons_args)
313
+
314
+ self.cache[x] = xval # Define in cache
315
+ self.gpool.add(xval) # Add to pool for processing feasibility
316
+ self.fpool.add(xval) # Add to pool for processing field values
317
+ return self.cache[x]
318
+
319
+ def __getstate__(self):
320
+ self_dict = self.__dict__.copy()
321
+ del self_dict['pool']
322
+ return self_dict
323
+
324
+ def process_pools(self):
325
+ if self.g_cons is not None:
326
+ self.process_gpool()
327
+ self.process_fpool()
328
+ self.proc_minimisers()
329
+
330
+ def feasibility_check(self, v):
331
+ v.feasible = True
332
+ for g, args in zip(self.g_cons, self.g_cons_args):
333
+ # constraint may return more than 1 value.
334
+ if np.any(g(v.x_a, *args) < 0.0):
335
+ v.f = np.inf
336
+ v.feasible = False
337
+ break
338
+
339
+ def compute_sfield(self, v):
340
+ """Compute the scalar field values of a vertex object `v`.
341
+
342
+ Parameters
343
+ ----------
344
+ v : VertexBase or VertexScalarField object
345
+ """
346
+ try:
347
+ v.f = self.field(v.x_a, *self.field_args)
348
+ self.nfev += 1
349
+ except AttributeError:
350
+ v.f = np.inf
351
+ # logging.warning(f"Field function not found at x = {self.x_a}")
352
+ if np.isnan(v.f):
353
+ v.f = np.inf
354
+
355
+ def proc_gpool(self):
356
+ """Process all constraints."""
357
+ if self.g_cons is not None:
358
+ for v in self.gpool:
359
+ self.feasibility_check(v)
360
+ # Clean the pool
361
+ self.gpool = set()
362
+
363
+ def pproc_gpool(self):
364
+ """Process all constraints in parallel."""
365
+ gpool_l = []
366
+ for v in self.gpool:
367
+ gpool_l.append(v.x_a)
368
+
369
+ G = self._mapwrapper(self.wgcons.gcons, gpool_l)
370
+ for v, g in zip(self.gpool, G):
371
+ v.feasible = g # set vertex object attribute v.feasible = g (bool)
372
+
373
+ def proc_fpool_g(self):
374
+ """Process all field functions with constraints supplied."""
375
+ for v in self.fpool:
376
+ if v.feasible:
377
+ self.compute_sfield(v)
378
+ # Clean the pool
379
+ self.fpool = set()
380
+
381
+ def proc_fpool_nog(self):
382
+ """Process all field functions with no constraints supplied."""
383
+ for v in self.fpool:
384
+ self.compute_sfield(v)
385
+ # Clean the pool
386
+ self.fpool = set()
387
+
388
+ def pproc_fpool_g(self):
389
+ """
390
+ Process all field functions with constraints supplied in parallel.
391
+ """
392
+ self.wfield.func
393
+ fpool_l = []
394
+ for v in self.fpool:
395
+ if v.feasible:
396
+ fpool_l.append(v.x_a)
397
+ else:
398
+ v.f = np.inf
399
+ F = self._mapwrapper(self.wfield.func, fpool_l)
400
+ for va, f in zip(fpool_l, F):
401
+ vt = tuple(va)
402
+ self[vt].f = f # set vertex object attribute v.f = f
403
+ self.nfev += 1
404
+ # Clean the pool
405
+ self.fpool = set()
406
+
407
+ def pproc_fpool_nog(self):
408
+ """
409
+ Process all field functions with no constraints supplied in parallel.
410
+ """
411
+ self.wfield.func
412
+ fpool_l = []
413
+ for v in self.fpool:
414
+ fpool_l.append(v.x_a)
415
+ F = self._mapwrapper(self.wfield.func, fpool_l)
416
+ for va, f in zip(fpool_l, F):
417
+ vt = tuple(va)
418
+ self[vt].f = f # set vertex object attribute v.f = f
419
+ self.nfev += 1
420
+ # Clean the pool
421
+ self.fpool = set()
422
+
423
+ def proc_minimisers(self):
424
+ """Check for minimisers."""
425
+ for v in self:
426
+ v.minimiser()
427
+ v.maximiser()
428
+
429
+
430
+ class ConstraintWrapper:
431
+ """Object to wrap constraints to pass to `multiprocessing.Pool`."""
432
+ def __init__(self, g_cons, g_cons_args):
433
+ self.g_cons = g_cons
434
+ self.g_cons_args = g_cons_args
435
+
436
+ def gcons(self, v_x_a):
437
+ vfeasible = True
438
+ for g, args in zip(self.g_cons, self.g_cons_args):
439
+ # constraint may return more than 1 value.
440
+ if np.any(g(v_x_a, *args) < 0.0):
441
+ vfeasible = False
442
+ break
443
+ return vfeasible
444
+
445
+
446
+ class FieldWrapper:
447
+ """Object to wrap field to pass to `multiprocessing.Pool`."""
448
+ def __init__(self, field, field_args):
449
+ self.field = field
450
+ self.field_args = field_args
451
+
452
+ def func(self, v_x_a):
453
+ try:
454
+ v_f = self.field(v_x_a, *self.field_args)
455
+ except Exception:
456
+ v_f = np.inf
457
+ if np.isnan(v_f):
458
+ v_f = np.inf
459
+
460
+ return v_f
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """This module contains the equality constrained SQP solver."""
2
+
3
+
4
+ from .minimize_trustregion_constr import _minimize_trustregion_constr
5
+
6
+ __all__ = ['_minimize_trustregion_constr']
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (375 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc ADDED
Binary file (9.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.sparse as sps
3
+
4
+
5
+ class CanonicalConstraint:
6
+ """Canonical constraint to use with trust-constr algorithm.
7
+
8
+ It represents the set of constraints of the form::
9
+
10
+ f_eq(x) = 0
11
+ f_ineq(x) <= 0
12
+
13
+ where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see
14
+ below.
15
+
16
+ The class is supposed to be instantiated by factory methods, which
17
+ should prepare the parameters listed below.
18
+
19
+ Parameters
20
+ ----------
21
+ n_eq, n_ineq : int
22
+ Number of equality and inequality constraints respectively.
23
+ fun : callable
24
+ Function defining the constraints. The signature is
25
+ ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq`
26
+ components and ``c_ineq`` is ndarray with `n_ineq` components.
27
+ jac : callable
28
+ Function to evaluate the Jacobian of the constraint. The signature
29
+ is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are
30
+ either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n),
31
+ respectively.
32
+ hess : callable
33
+ Function to evaluate the Hessian of the constraints multiplied
34
+ by Lagrange multipliers, that is
35
+ ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is
36
+ ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied
37
+ shape (n, n) and provide a matrix-vector product operation
38
+ ``H.dot(p)``.
39
+ keep_feasible : ndarray, shape (n_ineq,)
40
+ Mask indicating which inequality constraints should be kept feasible.
41
+ """
42
+ def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible):
43
+ self.n_eq = n_eq
44
+ self.n_ineq = n_ineq
45
+ self.fun = fun
46
+ self.jac = jac
47
+ self.hess = hess
48
+ self.keep_feasible = keep_feasible
49
+
50
+ @classmethod
51
+ def from_PreparedConstraint(cls, constraint):
52
+ """Create an instance from `PreparedConstrained` object."""
53
+ lb, ub = constraint.bounds
54
+ cfun = constraint.fun
55
+ keep_feasible = constraint.keep_feasible
56
+
57
+ if np.all(lb == -np.inf) and np.all(ub == np.inf):
58
+ return cls.empty(cfun.n)
59
+
60
+ if np.all(lb == -np.inf) and np.all(ub == np.inf):
61
+ return cls.empty(cfun.n)
62
+ elif np.all(lb == ub):
63
+ return cls._equal_to_canonical(cfun, lb)
64
+ elif np.all(lb == -np.inf):
65
+ return cls._less_to_canonical(cfun, ub, keep_feasible)
66
+ elif np.all(ub == np.inf):
67
+ return cls._greater_to_canonical(cfun, lb, keep_feasible)
68
+ else:
69
+ return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)
70
+
71
+ @classmethod
72
+ def empty(cls, n):
73
+ """Create an "empty" instance.
74
+
75
+ This "empty" instance is required to allow working with unconstrained
76
+ problems as if they have some constraints.
77
+ """
78
+ empty_fun = np.empty(0)
79
+ empty_jac = np.empty((0, n))
80
+ empty_hess = sps.csr_matrix((n, n))
81
+
82
+ def fun(x):
83
+ return empty_fun, empty_fun
84
+
85
+ def jac(x):
86
+ return empty_jac, empty_jac
87
+
88
+ def hess(x, v_eq, v_ineq):
89
+ return empty_hess
90
+
91
+ return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_))
92
+
93
+ @classmethod
94
+ def concatenate(cls, canonical_constraints, sparse_jacobian):
95
+ """Concatenate multiple `CanonicalConstraint` into one.
96
+
97
+ `sparse_jacobian` (bool) determines the Jacobian format of the
98
+ concatenated constraint. Note that items in `canonical_constraints`
99
+ must have their Jacobians in the same format.
100
+ """
101
+ def fun(x):
102
+ if canonical_constraints:
103
+ eq_all, ineq_all = zip(
104
+ *[c.fun(x) for c in canonical_constraints])
105
+ else:
106
+ eq_all, ineq_all = [], []
107
+
108
+ return np.hstack(eq_all), np.hstack(ineq_all)
109
+
110
+ if sparse_jacobian:
111
+ vstack = sps.vstack
112
+ else:
113
+ vstack = np.vstack
114
+
115
+ def jac(x):
116
+ if canonical_constraints:
117
+ eq_all, ineq_all = zip(
118
+ *[c.jac(x) for c in canonical_constraints])
119
+ else:
120
+ eq_all, ineq_all = [], []
121
+
122
+ return vstack(eq_all), vstack(ineq_all)
123
+
124
+ def hess(x, v_eq, v_ineq):
125
+ hess_all = []
126
+ index_eq = 0
127
+ index_ineq = 0
128
+ for c in canonical_constraints:
129
+ vc_eq = v_eq[index_eq:index_eq + c.n_eq]
130
+ vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq]
131
+ hess_all.append(c.hess(x, vc_eq, vc_ineq))
132
+ index_eq += c.n_eq
133
+ index_ineq += c.n_ineq
134
+
135
+ def matvec(p):
136
+ result = np.zeros_like(p)
137
+ for h in hess_all:
138
+ result += h.dot(p)
139
+ return result
140
+
141
+ n = x.shape[0]
142
+ return sps.linalg.LinearOperator((n, n), matvec, dtype=float)
143
+
144
+ n_eq = sum(c.n_eq for c in canonical_constraints)
145
+ n_ineq = sum(c.n_ineq for c in canonical_constraints)
146
+ keep_feasible = np.hstack([c.keep_feasible for c in
147
+ canonical_constraints])
148
+
149
+ return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
150
+
151
+ @classmethod
152
+ def _equal_to_canonical(cls, cfun, value):
153
+ empty_fun = np.empty(0)
154
+ n = cfun.n
155
+
156
+ n_eq = value.shape[0]
157
+ n_ineq = 0
158
+ keep_feasible = np.empty(0, dtype=bool)
159
+
160
+ if cfun.sparse_jacobian:
161
+ empty_jac = sps.csr_matrix((0, n))
162
+ else:
163
+ empty_jac = np.empty((0, n))
164
+
165
+ def fun(x):
166
+ return cfun.fun(x) - value, empty_fun
167
+
168
+ def jac(x):
169
+ return cfun.jac(x), empty_jac
170
+
171
+ def hess(x, v_eq, v_ineq):
172
+ return cfun.hess(x, v_eq)
173
+
174
+ empty_fun = np.empty(0)
175
+ n = cfun.n
176
+ if cfun.sparse_jacobian:
177
+ empty_jac = sps.csr_matrix((0, n))
178
+ else:
179
+ empty_jac = np.empty((0, n))
180
+
181
+ return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
182
+
183
+ @classmethod
184
+ def _less_to_canonical(cls, cfun, ub, keep_feasible):
185
+ empty_fun = np.empty(0)
186
+ n = cfun.n
187
+ if cfun.sparse_jacobian:
188
+ empty_jac = sps.csr_matrix((0, n))
189
+ else:
190
+ empty_jac = np.empty((0, n))
191
+
192
+ finite_ub = ub < np.inf
193
+ n_eq = 0
194
+ n_ineq = np.sum(finite_ub)
195
+
196
+ if np.all(finite_ub):
197
+ def fun(x):
198
+ return empty_fun, cfun.fun(x) - ub
199
+
200
+ def jac(x):
201
+ return empty_jac, cfun.jac(x)
202
+
203
+ def hess(x, v_eq, v_ineq):
204
+ return cfun.hess(x, v_ineq)
205
+ else:
206
+ finite_ub = np.nonzero(finite_ub)[0]
207
+ keep_feasible = keep_feasible[finite_ub]
208
+ ub = ub[finite_ub]
209
+
210
+ def fun(x):
211
+ return empty_fun, cfun.fun(x)[finite_ub] - ub
212
+
213
+ def jac(x):
214
+ return empty_jac, cfun.jac(x)[finite_ub]
215
+
216
+ def hess(x, v_eq, v_ineq):
217
+ v = np.zeros(cfun.m)
218
+ v[finite_ub] = v_ineq
219
+ return cfun.hess(x, v)
220
+
221
+ return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
222
+
223
+ @classmethod
224
+ def _greater_to_canonical(cls, cfun, lb, keep_feasible):
225
+ empty_fun = np.empty(0)
226
+ n = cfun.n
227
+ if cfun.sparse_jacobian:
228
+ empty_jac = sps.csr_matrix((0, n))
229
+ else:
230
+ empty_jac = np.empty((0, n))
231
+
232
+ finite_lb = lb > -np.inf
233
+ n_eq = 0
234
+ n_ineq = np.sum(finite_lb)
235
+
236
+ if np.all(finite_lb):
237
+ def fun(x):
238
+ return empty_fun, lb - cfun.fun(x)
239
+
240
+ def jac(x):
241
+ return empty_jac, -cfun.jac(x)
242
+
243
+ def hess(x, v_eq, v_ineq):
244
+ return cfun.hess(x, -v_ineq)
245
+ else:
246
+ finite_lb = np.nonzero(finite_lb)[0]
247
+ keep_feasible = keep_feasible[finite_lb]
248
+ lb = lb[finite_lb]
249
+
250
+ def fun(x):
251
+ return empty_fun, lb - cfun.fun(x)[finite_lb]
252
+
253
+ def jac(x):
254
+ return empty_jac, -cfun.jac(x)[finite_lb]
255
+
256
+ def hess(x, v_eq, v_ineq):
257
+ v = np.zeros(cfun.m)
258
+ v[finite_lb] = -v_ineq
259
+ return cfun.hess(x, v)
260
+
261
+ return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
262
+
263
+ @classmethod
264
+ def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible):
265
+ lb_inf = lb == -np.inf
266
+ ub_inf = ub == np.inf
267
+ equal = lb == ub
268
+ less = lb_inf & ~ub_inf
269
+ greater = ub_inf & ~lb_inf
270
+ interval = ~equal & ~lb_inf & ~ub_inf
271
+
272
+ equal = np.nonzero(equal)[0]
273
+ less = np.nonzero(less)[0]
274
+ greater = np.nonzero(greater)[0]
275
+ interval = np.nonzero(interval)[0]
276
+ n_less = less.shape[0]
277
+ n_greater = greater.shape[0]
278
+ n_interval = interval.shape[0]
279
+ n_ineq = n_less + n_greater + 2 * n_interval
280
+ n_eq = equal.shape[0]
281
+
282
+ keep_feasible = np.hstack((keep_feasible[less],
283
+ keep_feasible[greater],
284
+ keep_feasible[interval],
285
+ keep_feasible[interval]))
286
+
287
+ def fun(x):
288
+ f = cfun.fun(x)
289
+ eq = f[equal] - lb[equal]
290
+ le = f[less] - ub[less]
291
+ ge = lb[greater] - f[greater]
292
+ il = f[interval] - ub[interval]
293
+ ig = lb[interval] - f[interval]
294
+ return eq, np.hstack((le, ge, il, ig))
295
+
296
+ def jac(x):
297
+ J = cfun.jac(x)
298
+ eq = J[equal]
299
+ le = J[less]
300
+ ge = -J[greater]
301
+ il = J[interval]
302
+ ig = -il
303
+ if sps.issparse(J):
304
+ ineq = sps.vstack((le, ge, il, ig))
305
+ else:
306
+ ineq = np.vstack((le, ge, il, ig))
307
+ return eq, ineq
308
+
309
+ def hess(x, v_eq, v_ineq):
310
+ n_start = 0
311
+ v_l = v_ineq[n_start:n_start + n_less]
312
+ n_start += n_less
313
+ v_g = v_ineq[n_start:n_start + n_greater]
314
+ n_start += n_greater
315
+ v_il = v_ineq[n_start:n_start + n_interval]
316
+ n_start += n_interval
317
+ v_ig = v_ineq[n_start:n_start + n_interval]
318
+
319
+ v = np.zeros_like(lb)
320
+ v[equal] = v_eq
321
+ v[less] = v_l
322
+ v[greater] = -v_g
323
+ v[interval] = v_il - v_ig
324
+
325
+ return cfun.hess(x, v)
326
+
327
+ return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
328
+
329
+
330
+ def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian):
331
+ """Convert initial values of the constraints to the canonical format.
332
+
333
+ The purpose to avoid one additional call to the constraints at the initial
334
+ point. It takes saved values in `PreparedConstraint`, modififies and
335
+ concatenates them to the canonical constraint format.
336
+ """
337
+ c_eq = []
338
+ c_ineq = []
339
+ J_eq = []
340
+ J_ineq = []
341
+
342
+ for c in prepared_constraints:
343
+ f = c.fun.f
344
+ J = c.fun.J
345
+ lb, ub = c.bounds
346
+ if np.all(lb == ub):
347
+ c_eq.append(f - lb)
348
+ J_eq.append(J)
349
+ elif np.all(lb == -np.inf):
350
+ finite_ub = ub < np.inf
351
+ c_ineq.append(f[finite_ub] - ub[finite_ub])
352
+ J_ineq.append(J[finite_ub])
353
+ elif np.all(ub == np.inf):
354
+ finite_lb = lb > -np.inf
355
+ c_ineq.append(lb[finite_lb] - f[finite_lb])
356
+ J_ineq.append(-J[finite_lb])
357
+ else:
358
+ lb_inf = lb == -np.inf
359
+ ub_inf = ub == np.inf
360
+ equal = lb == ub
361
+ less = lb_inf & ~ub_inf
362
+ greater = ub_inf & ~lb_inf
363
+ interval = ~equal & ~lb_inf & ~ub_inf
364
+
365
+ c_eq.append(f[equal] - lb[equal])
366
+ c_ineq.append(f[less] - ub[less])
367
+ c_ineq.append(lb[greater] - f[greater])
368
+ c_ineq.append(f[interval] - ub[interval])
369
+ c_ineq.append(lb[interval] - f[interval])
370
+
371
+ J_eq.append(J[equal])
372
+ J_ineq.append(J[less])
373
+ J_ineq.append(-J[greater])
374
+ J_ineq.append(J[interval])
375
+ J_ineq.append(-J[interval])
376
+
377
+ c_eq = np.hstack(c_eq) if c_eq else np.empty(0)
378
+ c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0)
379
+
380
+ if sparse_jacobian:
381
+ vstack = sps.vstack
382
+ empty = sps.csr_matrix((0, n))
383
+ else:
384
+ vstack = np.vstack
385
+ empty = np.empty((0, n))
386
+
387
+ J_eq = vstack(J_eq) if J_eq else empty
388
+ J_ineq = vstack(J_ineq) if J_ineq else empty
389
+
390
+ return c_eq, c_ineq, J_eq, J_ineq
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Byrd-Omojokun Trust-Region SQP method."""
2
+
3
+ from scipy.sparse import eye as speye
4
+ from .projections import projections
5
+ from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
6
+ import numpy as np
7
+ from numpy.linalg import norm
8
+
9
+ __all__ = ['equality_constrained_sqp']
10
+
11
+
12
+ def default_scaling(x):
13
+ n, = np.shape(x)
14
+ return speye(n)
15
+
16
+
17
+ def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
18
+ x0, fun0, grad0, constr0,
19
+ jac0, stop_criteria,
20
+ state,
21
+ initial_penalty,
22
+ initial_trust_radius,
23
+ factorization_method,
24
+ trust_lb=None,
25
+ trust_ub=None,
26
+ scaling=default_scaling):
27
+ """Solve nonlinear equality-constrained problem using trust-region SQP.
28
+
29
+ Solve optimization problem:
30
+
31
+ minimize fun(x)
32
+ subject to: constr(x) = 0
33
+
34
+ using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
35
+ implementation details are based on [2]_ and [3]_, p. 549.
36
+
37
+ References
38
+ ----------
39
+ .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
40
+ implementation of an algorithm for large-scale equality
41
+ constrained optimization." SIAM Journal on
42
+ Optimization 8.3 (1998): 682-706.
43
+ .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
44
+ "An interior point algorithm for large-scale nonlinear
45
+ programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
46
+ .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
47
+ Second Edition (2006).
48
+ """
49
+ PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
50
+ LARGE_REDUCTION_RATIO = 0.9
51
+ INTERMEDIARY_REDUCTION_RATIO = 0.3
52
+ SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
53
+ TRUST_ENLARGEMENT_FACTOR_L = 7.0
54
+ TRUST_ENLARGEMENT_FACTOR_S = 2.0
55
+ MAX_TRUST_REDUCTION = 0.5
56
+ MIN_TRUST_REDUCTION = 0.1
57
+ SOC_THRESHOLD = 0.1
58
+ TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
59
+ BOX_FACTOR = 0.5
60
+
61
+ n, = np.shape(x0) # Number of parameters
62
+
63
+ # Set default lower and upper bounds.
64
+ if trust_lb is None:
65
+ trust_lb = np.full(n, -np.inf)
66
+ if trust_ub is None:
67
+ trust_ub = np.full(n, np.inf)
68
+
69
+ # Initial values
70
+ x = np.copy(x0)
71
+ trust_radius = initial_trust_radius
72
+ penalty = initial_penalty
73
+ # Compute Values
74
+ f = fun0
75
+ c = grad0
76
+ b = constr0
77
+ A = jac0
78
+ S = scaling(x)
79
+ # Get projections
80
+ Z, LS, Y = projections(A, factorization_method)
81
+ # Compute least-square lagrange multipliers
82
+ v = -LS.dot(c)
83
+ # Compute Hessian
84
+ H = lagr_hess(x, v)
85
+
86
+ # Update state parameters
87
+ optimality = norm(c + A.T.dot(v), np.inf)
88
+ constr_violation = norm(b, np.inf) if len(b) > 0 else 0
89
+ cg_info = {'niter': 0, 'stop_cond': 0,
90
+ 'hits_boundary': False}
91
+
92
+ last_iteration_failed = False
93
+ while not stop_criteria(state, x, last_iteration_failed,
94
+ optimality, constr_violation,
95
+ trust_radius, penalty, cg_info):
96
+ # Normal Step - `dn`
97
+ # minimize 1/2*||A dn + b||^2
98
+ # subject to:
99
+ # ||dn|| <= TR_FACTOR * trust_radius
100
+ # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
101
+ dn = modified_dogleg(A, Y, b,
102
+ TR_FACTOR*trust_radius,
103
+ BOX_FACTOR*trust_lb,
104
+ BOX_FACTOR*trust_ub)
105
+
106
+ # Tangential Step - `dt`
107
+ # Solve the QP problem:
108
+ # minimize 1/2 dt.T H dt + dt.T (H dn + c)
109
+ # subject to:
110
+ # A dt = 0
111
+ # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
112
+ # lb - dn <= dt <= ub - dn
113
+ c_t = H.dot(dn) + c
114
+ b_t = np.zeros_like(b)
115
+ trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
116
+ lb_t = trust_lb - dn
117
+ ub_t = trust_ub - dn
118
+ dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
119
+ trust_radius_t,
120
+ lb_t, ub_t)
121
+
122
+ # Compute update (normal + tangential steps).
123
+ d = dn + dt
124
+
125
+ # Compute second order model: 1/2 d H d + c.T d + f.
126
+ quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
127
+ # Compute linearized constraint: l = A d + b.
128
+ linearized_constr = A.dot(d)+b
129
+ # Compute new penalty parameter according to formula (3.52),
130
+ # reference [2]_, p.891.
131
+ vpred = norm(b) - norm(linearized_constr)
132
+ # Guarantee `vpred` always positive,
133
+ # regardless of roundoff errors.
134
+ vpred = max(1e-16, vpred)
135
+ previous_penalty = penalty
136
+ if quadratic_model > 0:
137
+ new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
138
+ penalty = max(penalty, new_penalty)
139
+ # Compute predicted reduction according to formula (3.52),
140
+ # reference [2]_, p.891.
141
+ predicted_reduction = -quadratic_model + penalty*vpred
142
+
143
+ # Compute merit function at current point
144
+ merit_function = f + penalty*norm(b)
145
+ # Evaluate function and constraints at trial point
146
+ x_next = x + S.dot(d)
147
+ f_next, b_next = fun_and_constr(x_next)
148
+ # Compute merit function at trial point
149
+ merit_function_next = f_next + penalty*norm(b_next)
150
+ # Compute actual reduction according to formula (3.54),
151
+ # reference [2]_, p.892.
152
+ actual_reduction = merit_function - merit_function_next
153
+ # Compute reduction ratio
154
+ reduction_ratio = actual_reduction / predicted_reduction
155
+
156
+ # Second order correction (SOC), reference [2]_, p.892.
157
+ if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
158
+ norm(dn) <= SOC_THRESHOLD * norm(dt):
159
+ # Compute second order correction
160
+ y = -Y.dot(b_next)
161
+ # Make sure increment is inside box constraints
162
+ _, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
163
+ # Compute tentative point
164
+ x_soc = x + S.dot(d + t*y)
165
+ f_soc, b_soc = fun_and_constr(x_soc)
166
+ # Recompute actual reduction
167
+ merit_function_soc = f_soc + penalty*norm(b_soc)
168
+ actual_reduction_soc = merit_function - merit_function_soc
169
+ # Recompute reduction ratio
170
+ reduction_ratio_soc = actual_reduction_soc / predicted_reduction
171
+ if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
172
+ x_next = x_soc
173
+ f_next = f_soc
174
+ b_next = b_soc
175
+ reduction_ratio = reduction_ratio_soc
176
+
177
+ # Readjust trust region step, formula (3.55), reference [2]_, p.892.
178
+ if reduction_ratio >= LARGE_REDUCTION_RATIO:
179
+ trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
180
+ trust_radius)
181
+ elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
182
+ trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
183
+ trust_radius)
184
+ # Reduce trust region step, according to reference [3]_, p.696.
185
+ elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
186
+ trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) /
187
+ (1-reduction_ratio))
188
+ new_trust_radius = trust_reduction * norm(d)
189
+ if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
190
+ trust_radius *= MAX_TRUST_REDUCTION
191
+ elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
192
+ trust_radius = new_trust_radius
193
+ else:
194
+ trust_radius *= MIN_TRUST_REDUCTION
195
+
196
+ # Update iteration
197
+ if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
198
+ x = x_next
199
+ f, b = f_next, b_next
200
+ c, A = grad_and_jac(x)
201
+ S = scaling(x)
202
+ # Get projections
203
+ Z, LS, Y = projections(A, factorization_method)
204
+ # Compute least-square lagrange multipliers
205
+ v = -LS.dot(c)
206
+ # Compute Hessian
207
+ H = lagr_hess(x, v)
208
+ # Set Flag
209
+ last_iteration_failed = False
210
+ # Otimality values
211
+ optimality = norm(c + A.T.dot(v), np.inf)
212
+ constr_violation = norm(b, np.inf) if len(b) > 0 else 0
213
+ else:
214
+ penalty = previous_penalty
215
+ last_iteration_failed = True
216
+
217
+ return x, state
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import numpy as np
3
+ from scipy.sparse.linalg import LinearOperator
4
+ from .._differentiable_functions import VectorFunction
5
+ from .._constraints import (
6
+ NonlinearConstraint, LinearConstraint, PreparedConstraint, Bounds, strict_bounds)
7
+ from .._hessian_update_strategy import BFGS
8
+ from .._optimize import OptimizeResult
9
+ from .._differentiable_functions import ScalarFunction
10
+ from .equality_constrained_sqp import equality_constrained_sqp
11
+ from .canonical_constraint import (CanonicalConstraint,
12
+ initial_constraints_as_canonical)
13
+ from .tr_interior_point import tr_interior_point
14
+ from .report import BasicReport, SQPReport, IPReport
15
+
16
+
17
+ TERMINATION_MESSAGES = {
18
+ 0: "The maximum number of function evaluations is exceeded.",
19
+ 1: "`gtol` termination condition is satisfied.",
20
+ 2: "`xtol` termination condition is satisfied.",
21
+ 3: "`callback` function requested termination."
22
+ }
23
+
24
+
25
+ class HessianLinearOperator:
26
+ """Build LinearOperator from hessp"""
27
+ def __init__(self, hessp, n):
28
+ self.hessp = hessp
29
+ self.n = n
30
+
31
+ def __call__(self, x, *args):
32
+ def matvec(p):
33
+ return self.hessp(x, p, *args)
34
+
35
+ return LinearOperator((self.n, self.n), matvec=matvec)
36
+
37
+
38
+ class LagrangianHessian:
39
+ """The Hessian of the Lagrangian as LinearOperator.
40
+
41
+ The Lagrangian is computed as the objective function plus all the
42
+ constraints multiplied with some numbers (Lagrange multipliers).
43
+ """
44
+ def __init__(self, n, objective_hess, constraints_hess):
45
+ self.n = n
46
+ self.objective_hess = objective_hess
47
+ self.constraints_hess = constraints_hess
48
+
49
+ def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
50
+ H_objective = self.objective_hess(x)
51
+ H_constraints = self.constraints_hess(x, v_eq, v_ineq)
52
+
53
+ def matvec(p):
54
+ return H_objective.dot(p) + H_constraints.dot(p)
55
+
56
+ return LinearOperator((self.n, self.n), matvec)
57
+
58
+
59
+ def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
60
+ start_time, tr_radius, constr_penalty, cg_info):
61
+ state.nit += 1
62
+ state.nfev = objective.nfev
63
+ state.njev = objective.ngev
64
+ state.nhev = objective.nhev
65
+ state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
66
+ for c in prepared_constraints]
67
+ state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
68
+ for c in prepared_constraints]
69
+ state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
70
+ for c in prepared_constraints]
71
+
72
+ if not last_iteration_failed:
73
+ state.x = x
74
+ state.fun = objective.f
75
+ state.grad = objective.g
76
+ state.v = [c.fun.v for c in prepared_constraints]
77
+ state.constr = [c.fun.f for c in prepared_constraints]
78
+ state.jac = [c.fun.J for c in prepared_constraints]
79
+ # Compute Lagrangian Gradient
80
+ state.lagrangian_grad = np.copy(state.grad)
81
+ for c in prepared_constraints:
82
+ state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
83
+ state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
84
+ # Compute maximum constraint violation
85
+ state.constr_violation = 0
86
+ for i in range(len(prepared_constraints)):
87
+ lb, ub = prepared_constraints[i].bounds
88
+ c = state.constr[i]
89
+ state.constr_violation = np.max([state.constr_violation,
90
+ np.max(lb - c),
91
+ np.max(c - ub)])
92
+
93
+ state.execution_time = time.time() - start_time
94
+ state.tr_radius = tr_radius
95
+ state.constr_penalty = constr_penalty
96
+ state.cg_niter += cg_info["niter"]
97
+ state.cg_stop_cond = cg_info["stop_cond"]
98
+
99
+ return state
100
+
101
+
102
+ def update_state_ip(state, x, last_iteration_failed, objective,
103
+ prepared_constraints, start_time,
104
+ tr_radius, constr_penalty, cg_info,
105
+ barrier_parameter, barrier_tolerance):
106
+ state = update_state_sqp(state, x, last_iteration_failed, objective,
107
+ prepared_constraints, start_time, tr_radius,
108
+ constr_penalty, cg_info)
109
+ state.barrier_parameter = barrier_parameter
110
+ state.barrier_tolerance = barrier_tolerance
111
+ return state
112
+
113
+
114
+ def _minimize_trustregion_constr(fun, x0, args, grad,
115
+ hess, hessp, bounds, constraints,
116
+ xtol=1e-8, gtol=1e-8,
117
+ barrier_tol=1e-8,
118
+ sparse_jacobian=None,
119
+ callback=None, maxiter=1000,
120
+ verbose=0, finite_diff_rel_step=None,
121
+ initial_constr_penalty=1.0, initial_tr_radius=1.0,
122
+ initial_barrier_parameter=0.1,
123
+ initial_barrier_tolerance=0.1,
124
+ factorization_method=None,
125
+ disp=False):
126
+ """Minimize a scalar function subject to constraints.
127
+
128
+ Parameters
129
+ ----------
130
+ gtol : float, optional
131
+ Tolerance for termination by the norm of the Lagrangian gradient.
132
+ The algorithm will terminate when both the infinity norm (i.e., max
133
+ abs value) of the Lagrangian gradient and the constraint violation
134
+ are smaller than ``gtol``. Default is 1e-8.
135
+ xtol : float, optional
136
+ Tolerance for termination by the change of the independent variable.
137
+ The algorithm will terminate when ``tr_radius < xtol``, where
138
+ ``tr_radius`` is the radius of the trust region used in the algorithm.
139
+ Default is 1e-8.
140
+ barrier_tol : float, optional
141
+ Threshold on the barrier parameter for the algorithm termination.
142
+ When inequality constraints are present, the algorithm will terminate
143
+ only when the barrier parameter is less than `barrier_tol`.
144
+ Default is 1e-8.
145
+ sparse_jacobian : {bool, None}, optional
146
+ Determines how to represent Jacobians of the constraints. If bool,
147
+ then Jacobians of all the constraints will be converted to the
148
+ corresponding format. If None (default), then Jacobians won't be
149
+ converted, but the algorithm can proceed only if they all have the
150
+ same format.
151
+ initial_tr_radius: float, optional
152
+ Initial trust radius. The trust radius gives the maximum distance
153
+ between solution points in consecutive iterations. It reflects the
154
+ trust the algorithm puts in the local approximation of the optimization
155
+ problem. For an accurate local approximation the trust-region should be
156
+ large and for an approximation valid only close to the current point it
157
+ should be a small one. The trust radius is automatically updated throughout
158
+ the optimization process, with ``initial_tr_radius`` being its initial value.
159
+ Default is 1 (recommended in [1]_, p. 19).
160
+ initial_constr_penalty : float, optional
161
+ Initial constraints penalty parameter. The penalty parameter is used for
162
+ balancing the requirements of decreasing the objective function
163
+ and satisfying the constraints. It is used for defining the merit function:
164
+ ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
165
+ where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
166
+ the constraints. The merit function is used for accepting or rejecting
167
+ trial points and ``constr_penalty`` weights the two conflicting goals
168
+ of reducing objective function and constraints. The penalty is automatically
169
+ updated throughout the optimization process, with
170
+ ``initial_constr_penalty`` being its initial value. Default is 1
171
+ (recommended in [1]_, p 19).
172
+ initial_barrier_parameter, initial_barrier_tolerance: float, optional
173
+ Initial barrier parameter and initial tolerance for the barrier subproblem.
174
+ Both are used only when inequality constraints are present. For dealing with
175
+ optimization problems ``min_x f(x)`` subject to inequality constraints
176
+ ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
177
+ ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
178
+ constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
179
+ is solved for decreasing values of ``barrier_parameter`` and with decreasing
180
+ tolerances for the termination, starting with ``initial_barrier_parameter``
181
+ for the barrier parameter and ``initial_barrier_tolerance`` for the
182
+ barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
183
+ Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
184
+ with the same prefactor.
185
+ factorization_method : string or None, optional
186
+ Method to factorize the Jacobian of the constraints. Use None (default)
187
+ for the auto selection or one of:
188
+
189
+ - 'NormalEquation' (requires scikit-sparse)
190
+ - 'AugmentedSystem'
191
+ - 'QRFactorization'
192
+ - 'SVDFactorization'
193
+
194
+ The methods 'NormalEquation' and 'AugmentedSystem' can be used only
195
+ with sparse constraints. The projections required by the algorithm
196
+ will be computed using, respectively, the normal equation and the
197
+ augmented system approaches explained in [1]_. 'NormalEquation'
198
+ computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
199
+ performs the LU factorization of an augmented system. They usually
200
+ provide similar results. 'AugmentedSystem' is used by default for
201
+ sparse matrices.
202
+
203
+ The methods 'QRFactorization' and 'SVDFactorization' can be used
204
+ only with dense constraints. They compute the required projections
205
+ using, respectively, QR and SVD factorizations. The 'SVDFactorization'
206
+ method can cope with Jacobian matrices with deficient row rank and will
207
+ be used whenever other factorization methods fail (which may imply the
208
+ conversion of sparse matrices to a dense format when required).
209
+ By default, 'QRFactorization' is used for dense matrices.
210
+ finite_diff_rel_step : None or array_like, optional
211
+ Relative step size for the finite difference approximation.
212
+ maxiter : int, optional
213
+ Maximum number of algorithm iterations. Default is 1000.
214
+ verbose : {0, 1, 2}, optional
215
+ Level of algorithm's verbosity:
216
+
217
+ * 0 (default) : work silently.
218
+ * 1 : display a termination report.
219
+ * 2 : display progress during iterations.
220
+ * 3 : display progress during iterations (more complete report).
221
+
222
+ disp : bool, optional
223
+ If True (default), then `verbose` will be set to 1 if it was 0.
224
+
225
+ Returns
226
+ -------
227
+ `OptimizeResult` with the fields documented below. Note the following:
228
+
229
+ 1. All values corresponding to the constraints are ordered as they
230
+ were passed to the solver. And values corresponding to `bounds`
231
+ constraints are put *after* other constraints.
232
+ 2. All numbers of function, Jacobian or Hessian evaluations correspond
233
+ to numbers of actual Python function calls. It means, for example,
234
+ that if a Jacobian is estimated by finite differences, then the
235
+ number of Jacobian evaluations will be zero and the number of
236
+ function evaluations will be incremented by all calls during the
237
+ finite difference estimation.
238
+
239
+ x : ndarray, shape (n,)
240
+ Solution found.
241
+ optimality : float
242
+ Infinity norm of the Lagrangian gradient at the solution.
243
+ constr_violation : float
244
+ Maximum constraint violation at the solution.
245
+ fun : float
246
+ Objective function at the solution.
247
+ grad : ndarray, shape (n,)
248
+ Gradient of the objective function at the solution.
249
+ lagrangian_grad : ndarray, shape (n,)
250
+ Gradient of the Lagrangian function at the solution.
251
+ nit : int
252
+ Total number of iterations.
253
+ nfev : integer
254
+ Number of the objective function evaluations.
255
+ njev : integer
256
+ Number of the objective function gradient evaluations.
257
+ nhev : integer
258
+ Number of the objective function Hessian evaluations.
259
+ cg_niter : int
260
+ Total number of the conjugate gradient method iterations.
261
+ method : {'equality_constrained_sqp', 'tr_interior_point'}
262
+ Optimization method used.
263
+ constr : list of ndarray
264
+ List of constraint values at the solution.
265
+ jac : list of {ndarray, sparse matrix}
266
+ List of the Jacobian matrices of the constraints at the solution.
267
+ v : list of ndarray
268
+ List of the Lagrange multipliers for the constraints at the solution.
269
+ For an inequality constraint a positive multiplier means that the upper
270
+ bound is active, a negative multiplier means that the lower bound is
271
+ active and if a multiplier is zero it means the constraint is not
272
+ active.
273
+ constr_nfev : list of int
274
+ Number of constraint evaluations for each of the constraints.
275
+ constr_njev : list of int
276
+ Number of Jacobian matrix evaluations for each of the constraints.
277
+ constr_nhev : list of int
278
+ Number of Hessian evaluations for each of the constraints.
279
+ tr_radius : float
280
+ Radius of the trust region at the last iteration.
281
+ constr_penalty : float
282
+ Penalty parameter at the last iteration, see `initial_constr_penalty`.
283
+ barrier_tolerance : float
284
+ Tolerance for the barrier subproblem at the last iteration.
285
+ Only for problems with inequality constraints.
286
+ barrier_parameter : float
287
+ Barrier parameter at the last iteration. Only for problems
288
+ with inequality constraints.
289
+ execution_time : float
290
+ Total execution time.
291
+ message : str
292
+ Termination message.
293
+ status : {0, 1, 2, 3}
294
+ Termination status:
295
+
296
+ * 0 : The maximum number of function evaluations is exceeded.
297
+ * 1 : `gtol` termination condition is satisfied.
298
+ * 2 : `xtol` termination condition is satisfied.
299
+ * 3 : `callback` function requested termination.
300
+
301
+ cg_stop_cond : int
302
+ Reason for CG subproblem termination at the last iteration:
303
+
304
+ * 0 : CG subproblem not evaluated.
305
+ * 1 : Iteration limit was reached.
306
+ * 2 : Reached the trust-region boundary.
307
+ * 3 : Negative curvature detected.
308
+ * 4 : Tolerance was satisfied.
309
+
310
+ References
311
+ ----------
312
+ .. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
313
+ Trust region methods. 2000. Siam. pp. 19.
314
+ """
315
+ x0 = np.atleast_1d(x0).astype(float)
316
+ n_vars = np.size(x0)
317
+ if hess is None:
318
+ if callable(hessp):
319
+ hess = HessianLinearOperator(hessp, n_vars)
320
+ else:
321
+ hess = BFGS()
322
+ if disp and verbose == 0:
323
+ verbose = 1
324
+
325
+ if bounds is not None:
326
+ modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf)
327
+ modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf)
328
+ modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb)
329
+ modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub)
330
+ bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible)
331
+ finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
332
+ bounds.keep_feasible, n_vars)
333
+ else:
334
+ finite_diff_bounds = (-np.inf, np.inf)
335
+
336
+ # Define Objective Function
337
+ objective = ScalarFunction(fun, x0, args, grad, hess,
338
+ finite_diff_rel_step, finite_diff_bounds)
339
+
340
+ # Put constraints in list format when needed.
341
+ if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
342
+ constraints = [constraints]
343
+
344
+ # Prepare constraints.
345
+ prepared_constraints = [
346
+ PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
347
+ for c in constraints]
348
+
349
+ # Check that all constraints are either sparse or dense.
350
+ n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
351
+ if 0 < n_sparse < len(prepared_constraints):
352
+ raise ValueError("All constraints must have the same kind of the "
353
+ "Jacobian --- either all sparse or all dense. "
354
+ "You can set the sparsity globally by setting "
355
+ "`sparse_jacobian` to either True of False.")
356
+ if prepared_constraints:
357
+ sparse_jacobian = n_sparse > 0
358
+
359
+ if bounds is not None:
360
+ if sparse_jacobian is None:
361
+ sparse_jacobian = True
362
+ prepared_constraints.append(PreparedConstraint(bounds, x0,
363
+ sparse_jacobian))
364
+
365
+ # Concatenate initial constraints to the canonical form.
366
+ c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
367
+ n_vars, prepared_constraints, sparse_jacobian)
368
+
369
+ # Prepare all canonical constraints and concatenate it into one.
370
+ canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
371
+ for c in prepared_constraints]
372
+
373
+ if len(canonical_all) == 0:
374
+ canonical = CanonicalConstraint.empty(n_vars)
375
+ elif len(canonical_all) == 1:
376
+ canonical = canonical_all[0]
377
+ else:
378
+ canonical = CanonicalConstraint.concatenate(canonical_all,
379
+ sparse_jacobian)
380
+
381
+ # Generate the Hessian of the Lagrangian.
382
+ lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
383
+
384
+ # Choose appropriate method
385
+ if canonical.n_ineq == 0:
386
+ method = 'equality_constrained_sqp'
387
+ else:
388
+ method = 'tr_interior_point'
389
+
390
+ # Construct OptimizeResult
391
+ state = OptimizeResult(
392
+ nit=0, nfev=0, njev=0, nhev=0,
393
+ cg_niter=0, cg_stop_cond=0,
394
+ fun=objective.f, grad=objective.g,
395
+ lagrangian_grad=np.copy(objective.g),
396
+ constr=[c.fun.f for c in prepared_constraints],
397
+ jac=[c.fun.J for c in prepared_constraints],
398
+ constr_nfev=[0 for c in prepared_constraints],
399
+ constr_njev=[0 for c in prepared_constraints],
400
+ constr_nhev=[0 for c in prepared_constraints],
401
+ v=[c.fun.v for c in prepared_constraints],
402
+ method=method)
403
+
404
+ # Start counting
405
+ start_time = time.time()
406
+
407
+ # Define stop criteria
408
+ if method == 'equality_constrained_sqp':
409
+ def stop_criteria(state, x, last_iteration_failed,
410
+ optimality, constr_violation,
411
+ tr_radius, constr_penalty, cg_info):
412
+ state = update_state_sqp(state, x, last_iteration_failed,
413
+ objective, prepared_constraints,
414
+ start_time, tr_radius, constr_penalty,
415
+ cg_info)
416
+ if verbose == 2:
417
+ BasicReport.print_iteration(state.nit,
418
+ state.nfev,
419
+ state.cg_niter,
420
+ state.fun,
421
+ state.tr_radius,
422
+ state.optimality,
423
+ state.constr_violation)
424
+ elif verbose > 2:
425
+ SQPReport.print_iteration(state.nit,
426
+ state.nfev,
427
+ state.cg_niter,
428
+ state.fun,
429
+ state.tr_radius,
430
+ state.optimality,
431
+ state.constr_violation,
432
+ state.constr_penalty,
433
+ state.cg_stop_cond)
434
+ state.status = None
435
+ state.niter = state.nit # Alias for callback (backward-compatibility)
436
+ if callback is not None:
437
+ callback_stop = False
438
+ try:
439
+ callback_stop = callback(state)
440
+ except StopIteration:
441
+ callback_stop = True
442
+ if callback_stop:
443
+ state.status = 3
444
+ return True
445
+ if state.optimality < gtol and state.constr_violation < gtol:
446
+ state.status = 1
447
+ elif state.tr_radius < xtol:
448
+ state.status = 2
449
+ elif state.nit >= maxiter:
450
+ state.status = 0
451
+ return state.status in (0, 1, 2, 3)
452
+ elif method == 'tr_interior_point':
453
+ def stop_criteria(state, x, last_iteration_failed, tr_radius,
454
+ constr_penalty, cg_info, barrier_parameter,
455
+ barrier_tolerance):
456
+ state = update_state_ip(state, x, last_iteration_failed,
457
+ objective, prepared_constraints,
458
+ start_time, tr_radius, constr_penalty,
459
+ cg_info, barrier_parameter, barrier_tolerance)
460
+ if verbose == 2:
461
+ BasicReport.print_iteration(state.nit,
462
+ state.nfev,
463
+ state.cg_niter,
464
+ state.fun,
465
+ state.tr_radius,
466
+ state.optimality,
467
+ state.constr_violation)
468
+ elif verbose > 2:
469
+ IPReport.print_iteration(state.nit,
470
+ state.nfev,
471
+ state.cg_niter,
472
+ state.fun,
473
+ state.tr_radius,
474
+ state.optimality,
475
+ state.constr_violation,
476
+ state.constr_penalty,
477
+ state.barrier_parameter,
478
+ state.cg_stop_cond)
479
+ state.status = None
480
+ state.niter = state.nit # Alias for callback (backward compatibility)
481
+ if callback is not None:
482
+ callback_stop = False
483
+ try:
484
+ callback_stop = callback(state)
485
+ except StopIteration:
486
+ callback_stop = True
487
+ if callback_stop:
488
+ state.status = 3
489
+ return True
490
+ if state.optimality < gtol and state.constr_violation < gtol:
491
+ state.status = 1
492
+ elif (state.tr_radius < xtol
493
+ and state.barrier_parameter < barrier_tol):
494
+ state.status = 2
495
+ elif state.nit >= maxiter:
496
+ state.status = 0
497
+ return state.status in (0, 1, 2, 3)
498
+
499
+ if verbose == 2:
500
+ BasicReport.print_header()
501
+ elif verbose > 2:
502
+ if method == 'equality_constrained_sqp':
503
+ SQPReport.print_header()
504
+ elif method == 'tr_interior_point':
505
+ IPReport.print_header()
506
+
507
+ # Call inferior function to do the optimization
508
+ if method == 'equality_constrained_sqp':
509
+ def fun_and_constr(x):
510
+ f = objective.fun(x)
511
+ c_eq, _ = canonical.fun(x)
512
+ return f, c_eq
513
+
514
+ def grad_and_jac(x):
515
+ g = objective.grad(x)
516
+ J_eq, _ = canonical.jac(x)
517
+ return g, J_eq
518
+
519
+ _, result = equality_constrained_sqp(
520
+ fun_and_constr, grad_and_jac, lagrangian_hess,
521
+ x0, objective.f, objective.g,
522
+ c_eq0, J_eq0,
523
+ stop_criteria, state,
524
+ initial_constr_penalty, initial_tr_radius,
525
+ factorization_method)
526
+
527
+ elif method == 'tr_interior_point':
528
+ _, result = tr_interior_point(
529
+ objective.fun, objective.grad, lagrangian_hess,
530
+ n_vars, canonical.n_ineq, canonical.n_eq,
531
+ canonical.fun, canonical.jac,
532
+ x0, objective.f, objective.g,
533
+ c_ineq0, J_ineq0, c_eq0, J_eq0,
534
+ stop_criteria,
535
+ canonical.keep_feasible,
536
+ xtol, state, initial_barrier_parameter,
537
+ initial_barrier_tolerance,
538
+ initial_constr_penalty, initial_tr_radius,
539
+ factorization_method)
540
+
541
+ # Status 3 occurs when the callback function requests termination,
542
+ # this is assumed to not be a success.
543
+ result.success = True if result.status in (1, 2) else False
544
+ result.message = TERMINATION_MESSAGES[result.status]
545
+
546
+ # Alias (for backward compatibility with 1.1.0)
547
+ result.niter = result.nit
548
+
549
+ if verbose == 2:
550
+ BasicReport.print_footer()
551
+ elif verbose > 2:
552
+ if method == 'equality_constrained_sqp':
553
+ SQPReport.print_footer()
554
+ elif method == 'tr_interior_point':
555
+ IPReport.print_footer()
556
+ if verbose >= 1:
557
+ print(result.message)
558
+ print("Number of iterations: {}, function evaluations: {}, "
559
+ "CG iterations: {}, optimality: {:.2e}, "
560
+ "constraint violation: {:.2e}, execution time: {:4.2} s."
561
+ .format(result.nit, result.nfev, result.cg_niter,
562
+ result.optimality, result.constr_violation,
563
+ result.execution_time))
564
+ return result
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Basic linear factorizations needed by the solver."""
2
+
3
+ from scipy.sparse import (bmat, csc_matrix, eye, issparse)
4
+ from scipy.sparse.linalg import LinearOperator
5
+ import scipy.linalg
6
+ import scipy.sparse.linalg
7
+ try:
8
+ from sksparse.cholmod import cholesky_AAt
9
+ sksparse_available = True
10
+ except ImportError:
11
+ import warnings
12
+ sksparse_available = False
13
+ import numpy as np
14
+ from warnings import warn
15
+
16
+ __all__ = [
17
+ 'orthogonality',
18
+ 'projections',
19
+ ]
20
+
21
+
22
+ def orthogonality(A, g):
23
+ """Measure orthogonality between a vector and the null space of a matrix.
24
+
25
+ Compute a measure of orthogonality between the null space
26
+ of the (possibly sparse) matrix ``A`` and a given vector ``g``.
27
+
28
+ The formula is a simplified (and cheaper) version of formula (3.13)
29
+ from [1]_.
30
+ ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
31
+
32
+ References
33
+ ----------
34
+ .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
35
+ "On the solution of equality constrained quadratic
36
+ programming problems arising in optimization."
37
+ SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
38
+ """
39
+ # Compute vector norms
40
+ norm_g = np.linalg.norm(g)
41
+ # Compute Froebnius norm of the matrix A
42
+ if issparse(A):
43
+ norm_A = scipy.sparse.linalg.norm(A, ord='fro')
44
+ else:
45
+ norm_A = np.linalg.norm(A, ord='fro')
46
+
47
+ # Check if norms are zero
48
+ if norm_g == 0 or norm_A == 0:
49
+ return 0
50
+
51
+ norm_A_g = np.linalg.norm(A.dot(g))
52
+ # Orthogonality measure
53
+ orth = norm_A_g / (norm_A*norm_g)
54
+ return orth
55
+
56
+
57
+ def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
58
+ """Return linear operators for matrix A using ``NormalEquation`` approach.
59
+ """
60
+ # Cholesky factorization
61
+ factor = cholesky_AAt(A)
62
+
63
+ # z = x - A.T inv(A A.T) A x
64
+ def null_space(x):
65
+ v = factor(A.dot(x))
66
+ z = x - A.T.dot(v)
67
+
68
+ # Iterative refinement to improve roundoff
69
+ # errors described in [2]_, algorithm 5.1.
70
+ k = 0
71
+ while orthogonality(A, z) > orth_tol:
72
+ if k >= max_refin:
73
+ break
74
+ # z_next = z - A.T inv(A A.T) A z
75
+ v = factor(A.dot(z))
76
+ z = z - A.T.dot(v)
77
+ k += 1
78
+
79
+ return z
80
+
81
+ # z = inv(A A.T) A x
82
+ def least_squares(x):
83
+ return factor(A.dot(x))
84
+
85
+ # z = A.T inv(A A.T) x
86
+ def row_space(x):
87
+ return A.T.dot(factor(x))
88
+
89
+ return null_space, least_squares, row_space
90
+
91
+
92
+ def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
93
+ """Return linear operators for matrix A - ``AugmentedSystem``."""
94
+ # Form augmented system
95
+ K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
96
+ # LU factorization
97
+ # TODO: Use a symmetric indefinite factorization
98
+ # to solve the system twice as fast (because
99
+ # of the symmetry).
100
+ try:
101
+ solve = scipy.sparse.linalg.factorized(K)
102
+ except RuntimeError:
103
+ warn("Singular Jacobian matrix. Using dense SVD decomposition to "
104
+ "perform the factorizations.",
105
+ stacklevel=3)
106
+ return svd_factorization_projections(A.toarray(),
107
+ m, n, orth_tol,
108
+ max_refin, tol)
109
+
110
+ # z = x - A.T inv(A A.T) A x
111
+ # is computed solving the extended system:
112
+ # [I A.T] * [ z ] = [x]
113
+ # [A O ] [aux] [0]
114
+ def null_space(x):
115
+ # v = [x]
116
+ # [0]
117
+ v = np.hstack([x, np.zeros(m)])
118
+ # lu_sol = [ z ]
119
+ # [aux]
120
+ lu_sol = solve(v)
121
+ z = lu_sol[:n]
122
+
123
+ # Iterative refinement to improve roundoff
124
+ # errors described in [2]_, algorithm 5.2.
125
+ k = 0
126
+ while orthogonality(A, z) > orth_tol:
127
+ if k >= max_refin:
128
+ break
129
+ # new_v = [x] - [I A.T] * [ z ]
130
+ # [0] [A O ] [aux]
131
+ new_v = v - K.dot(lu_sol)
132
+ # [I A.T] * [delta z ] = new_v
133
+ # [A O ] [delta aux]
134
+ lu_update = solve(new_v)
135
+ # [ z ] += [delta z ]
136
+ # [aux] [delta aux]
137
+ lu_sol += lu_update
138
+ z = lu_sol[:n]
139
+ k += 1
140
+
141
+ # return z = x - A.T inv(A A.T) A x
142
+ return z
143
+
144
+ # z = inv(A A.T) A x
145
+ # is computed solving the extended system:
146
+ # [I A.T] * [aux] = [x]
147
+ # [A O ] [ z ] [0]
148
+ def least_squares(x):
149
+ # v = [x]
150
+ # [0]
151
+ v = np.hstack([x, np.zeros(m)])
152
+ # lu_sol = [aux]
153
+ # [ z ]
154
+ lu_sol = solve(v)
155
+ # return z = inv(A A.T) A x
156
+ return lu_sol[n:m+n]
157
+
158
+ # z = A.T inv(A A.T) x
159
+ # is computed solving the extended system:
160
+ # [I A.T] * [ z ] = [0]
161
+ # [A O ] [aux] [x]
162
+ def row_space(x):
163
+ # v = [0]
164
+ # [x]
165
+ v = np.hstack([np.zeros(n), x])
166
+ # lu_sol = [ z ]
167
+ # [aux]
168
+ lu_sol = solve(v)
169
+ # return z = A.T inv(A A.T) x
170
+ return lu_sol[:n]
171
+
172
+ return null_space, least_squares, row_space
173
+
174
+
175
+ def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
176
+ """Return linear operators for matrix A using ``QRFactorization`` approach.
177
+ """
178
+ # QRFactorization
179
+ Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
180
+
181
+ if np.linalg.norm(R[-1, :], np.inf) < tol:
182
+ warn('Singular Jacobian matrix. Using SVD decomposition to ' +
183
+ 'perform the factorizations.',
184
+ stacklevel=3)
185
+ return svd_factorization_projections(A, m, n,
186
+ orth_tol,
187
+ max_refin,
188
+ tol)
189
+
190
+ # z = x - A.T inv(A A.T) A x
191
+ def null_space(x):
192
+ # v = P inv(R) Q.T x
193
+ aux1 = Q.T.dot(x)
194
+ aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
195
+ v = np.zeros(m)
196
+ v[P] = aux2
197
+ z = x - A.T.dot(v)
198
+
199
+ # Iterative refinement to improve roundoff
200
+ # errors described in [2]_, algorithm 5.1.
201
+ k = 0
202
+ while orthogonality(A, z) > orth_tol:
203
+ if k >= max_refin:
204
+ break
205
+ # v = P inv(R) Q.T x
206
+ aux1 = Q.T.dot(z)
207
+ aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
208
+ v[P] = aux2
209
+ # z_next = z - A.T v
210
+ z = z - A.T.dot(v)
211
+ k += 1
212
+
213
+ return z
214
+
215
+ # z = inv(A A.T) A x
216
+ def least_squares(x):
217
+ # z = P inv(R) Q.T x
218
+ aux1 = Q.T.dot(x)
219
+ aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
220
+ z = np.zeros(m)
221
+ z[P] = aux2
222
+ return z
223
+
224
+ # z = A.T inv(A A.T) x
225
+ def row_space(x):
226
+ # z = Q inv(R.T) P.T x
227
+ aux1 = x[P]
228
+ aux2 = scipy.linalg.solve_triangular(R, aux1,
229
+ lower=False,
230
+ trans='T')
231
+ z = Q.dot(aux2)
232
+ return z
233
+
234
+ return null_space, least_squares, row_space
235
+
236
+
237
+ def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
238
+ """Return linear operators for matrix A using ``SVDFactorization`` approach.
239
+ """
240
+ # SVD Factorization
241
+ U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
242
+
243
+ # Remove dimensions related with very small singular values
244
+ U = U[:, s > tol]
245
+ Vt = Vt[s > tol, :]
246
+ s = s[s > tol]
247
+
248
+ # z = x - A.T inv(A A.T) A x
249
+ def null_space(x):
250
+ # v = U 1/s V.T x = inv(A A.T) A x
251
+ aux1 = Vt.dot(x)
252
+ aux2 = 1/s*aux1
253
+ v = U.dot(aux2)
254
+ z = x - A.T.dot(v)
255
+
256
+ # Iterative refinement to improve roundoff
257
+ # errors described in [2]_, algorithm 5.1.
258
+ k = 0
259
+ while orthogonality(A, z) > orth_tol:
260
+ if k >= max_refin:
261
+ break
262
+ # v = U 1/s V.T x = inv(A A.T) A x
263
+ aux1 = Vt.dot(z)
264
+ aux2 = 1/s*aux1
265
+ v = U.dot(aux2)
266
+ # z_next = z - A.T v
267
+ z = z - A.T.dot(v)
268
+ k += 1
269
+
270
+ return z
271
+
272
+ # z = inv(A A.T) A x
273
+ def least_squares(x):
274
+ # z = U 1/s V.T x = inv(A A.T) A x
275
+ aux1 = Vt.dot(x)
276
+ aux2 = 1/s*aux1
277
+ z = U.dot(aux2)
278
+ return z
279
+
280
+ # z = A.T inv(A A.T) x
281
+ def row_space(x):
282
+ # z = V 1/s U.T x
283
+ aux1 = U.T.dot(x)
284
+ aux2 = 1/s*aux1
285
+ z = Vt.T.dot(aux2)
286
+ return z
287
+
288
+ return null_space, least_squares, row_space
289
+
290
+
291
+ def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
292
+ """Return three linear operators related with a given matrix A.
293
+
294
+ Parameters
295
+ ----------
296
+ A : sparse matrix (or ndarray), shape (m, n)
297
+ Matrix ``A`` used in the projection.
298
+ method : string, optional
299
+ Method used for compute the given linear
300
+ operators. Should be one of:
301
+
302
+ - 'NormalEquation': The operators
303
+ will be computed using the
304
+ so-called normal equation approach
305
+ explained in [1]_. In order to do
306
+ so the Cholesky factorization of
307
+ ``(A A.T)`` is computed. Exclusive
308
+ for sparse matrices.
309
+ - 'AugmentedSystem': The operators
310
+ will be computed using the
311
+ so-called augmented system approach
312
+ explained in [1]_. Exclusive
313
+ for sparse matrices.
314
+ - 'QRFactorization': Compute projections
315
+ using QR factorization. Exclusive for
316
+ dense matrices.
317
+ - 'SVDFactorization': Compute projections
318
+ using SVD factorization. Exclusive for
319
+ dense matrices.
320
+
321
+ orth_tol : float, optional
322
+ Tolerance for iterative refinements.
323
+ max_refin : int, optional
324
+ Maximum number of iterative refinements.
325
+ tol : float, optional
326
+ Tolerance for singular values.
327
+
328
+ Returns
329
+ -------
330
+ Z : LinearOperator, shape (n, n)
331
+ Null-space operator. For a given vector ``x``,
332
+ the null space operator is equivalent to apply
333
+ a projection matrix ``P = I - A.T inv(A A.T) A``
334
+ to the vector. It can be shown that this is
335
+ equivalent to project ``x`` into the null space
336
+ of A.
337
+ LS : LinearOperator, shape (m, n)
338
+ Least-squares operator. For a given vector ``x``,
339
+ the least-squares operator is equivalent to apply a
340
+ pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
341
+ to the vector. It can be shown that this vector
342
+ ``pinv(A.T) x`` is the least_square solution to
343
+ ``A.T y = x``.
344
+ Y : LinearOperator, shape (n, m)
345
+ Row-space operator. For a given vector ``x``,
346
+ the row-space operator is equivalent to apply a
347
+ projection matrix ``Q = A.T inv(A A.T)``
348
+ to the vector. It can be shown that this
349
+ vector ``y = Q x`` the minimum norm solution
350
+ of ``A y = x``.
351
+
352
+ Notes
353
+ -----
354
+ Uses iterative refinements described in [1]
355
+ during the computation of ``Z`` in order to
356
+ cope with the possibility of large roundoff errors.
357
+
358
+ References
359
+ ----------
360
+ .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
361
+ "On the solution of equality constrained quadratic
362
+ programming problems arising in optimization."
363
+ SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
364
+ """
365
+ m, n = np.shape(A)
366
+
367
+ # The factorization of an empty matrix
368
+ # only works for the sparse representation.
369
+ if m*n == 0:
370
+ A = csc_matrix(A)
371
+
372
+ # Check Argument
373
+ if issparse(A):
374
+ if method is None:
375
+ method = "AugmentedSystem"
376
+ if method not in ("NormalEquation", "AugmentedSystem"):
377
+ raise ValueError("Method not allowed for sparse matrix.")
378
+ if method == "NormalEquation" and not sksparse_available:
379
+ warnings.warn("Only accepts 'NormalEquation' option when "
380
+ "scikit-sparse is available. Using "
381
+ "'AugmentedSystem' option instead.",
382
+ ImportWarning, stacklevel=3)
383
+ method = 'AugmentedSystem'
384
+ else:
385
+ if method is None:
386
+ method = "QRFactorization"
387
+ if method not in ("QRFactorization", "SVDFactorization"):
388
+ raise ValueError("Method not allowed for dense array.")
389
+
390
+ if method == 'NormalEquation':
391
+ null_space, least_squares, row_space \
392
+ = normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
393
+ elif method == 'AugmentedSystem':
394
+ null_space, least_squares, row_space \
395
+ = augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
396
+ elif method == "QRFactorization":
397
+ null_space, least_squares, row_space \
398
+ = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
399
+ elif method == "SVDFactorization":
400
+ null_space, least_squares, row_space \
401
+ = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
402
+
403
+ Z = LinearOperator((n, n), null_space)
404
+ LS = LinearOperator((m, n), least_squares)
405
+ Y = LinearOperator((n, m), row_space)
406
+
407
+ return Z, LS, Y
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Equality-constrained quadratic programming solvers."""
2
+
3
+ from scipy.sparse import (linalg, bmat, csc_matrix)
4
+ from math import copysign
5
+ import numpy as np
6
+ from numpy.linalg import norm
7
+
8
+ __all__ = [
9
+ 'eqp_kktfact',
10
+ 'sphere_intersections',
11
+ 'box_intersections',
12
+ 'box_sphere_intersections',
13
+ 'inside_box_boundaries',
14
+ 'modified_dogleg',
15
+ 'projected_cg'
16
+ ]
17
+
18
+
19
+ # For comparison with the projected CG
20
+ def eqp_kktfact(H, c, A, b):
21
+ """Solve equality-constrained quadratic programming (EQP) problem.
22
+
23
+ Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
24
+ using direct factorization of the KKT system.
25
+
26
+ Parameters
27
+ ----------
28
+ H : sparse matrix, shape (n, n)
29
+ Hessian matrix of the EQP problem.
30
+ c : array_like, shape (n,)
31
+ Gradient of the quadratic objective function.
32
+ A : sparse matrix
33
+ Jacobian matrix of the EQP problem.
34
+ b : array_like, shape (m,)
35
+ Right-hand side of the constraint equation.
36
+
37
+ Returns
38
+ -------
39
+ x : array_like, shape (n,)
40
+ Solution of the KKT problem.
41
+ lagrange_multipliers : ndarray, shape (m,)
42
+ Lagrange multipliers of the KKT problem.
43
+ """
44
+ n, = np.shape(c) # Number of parameters
45
+ m, = np.shape(b) # Number of constraints
46
+
47
+ # Karush-Kuhn-Tucker matrix of coefficients.
48
+ # Defined as in Nocedal/Wright "Numerical
49
+ # Optimization" p.452 in Eq. (16.4).
50
+ kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
51
+ # Vector of coefficients.
52
+ kkt_vec = np.hstack([-c, -b])
53
+
54
+ # TODO: Use a symmetric indefinite factorization
55
+ # to solve the system twice as fast (because
56
+ # of the symmetry).
57
+ lu = linalg.splu(kkt_matrix)
58
+ kkt_sol = lu.solve(kkt_vec)
59
+ x = kkt_sol[:n]
60
+ lagrange_multipliers = -kkt_sol[n:n+m]
61
+
62
+ return x, lagrange_multipliers
63
+
64
+
65
+ def sphere_intersections(z, d, trust_radius,
66
+ entire_line=False):
67
+ """Find the intersection between segment (or line) and spherical constraints.
68
+
69
+ Find the intersection between the segment (or line) defined by the
70
+ parametric equation ``x(t) = z + t*d`` and the ball
71
+ ``||x|| <= trust_radius``.
72
+
73
+ Parameters
74
+ ----------
75
+ z : array_like, shape (n,)
76
+ Initial point.
77
+ d : array_like, shape (n,)
78
+ Direction.
79
+ trust_radius : float
80
+ Ball radius.
81
+ entire_line : bool, optional
82
+ When ``True``, the function returns the intersection between the line
83
+ ``x(t) = z + t*d`` (``t`` can assume any value) and the ball
84
+ ``||x|| <= trust_radius``. When ``False``, the function returns the intersection
85
+ between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
86
+
87
+ Returns
88
+ -------
89
+ ta, tb : float
90
+ The line/segment ``x(t) = z + t*d`` is inside the ball for
91
+ for ``ta <= t <= tb``.
92
+ intersect : bool
93
+ When ``True``, there is a intersection between the line/segment
94
+ and the sphere. On the other hand, when ``False``, there is no
95
+ intersection.
96
+ """
97
+ # Special case when d=0
98
+ if norm(d) == 0:
99
+ return 0, 0, False
100
+ # Check for inf trust_radius
101
+ if np.isinf(trust_radius):
102
+ if entire_line:
103
+ ta = -np.inf
104
+ tb = np.inf
105
+ else:
106
+ ta = 0
107
+ tb = 1
108
+ intersect = True
109
+ return ta, tb, intersect
110
+
111
+ a = np.dot(d, d)
112
+ b = 2 * np.dot(z, d)
113
+ c = np.dot(z, z) - trust_radius**2
114
+ discriminant = b*b - 4*a*c
115
+ if discriminant < 0:
116
+ intersect = False
117
+ return 0, 0, intersect
118
+ sqrt_discriminant = np.sqrt(discriminant)
119
+
120
+ # The following calculation is mathematically
121
+ # equivalent to:
122
+ # ta = (-b - sqrt_discriminant) / (2*a)
123
+ # tb = (-b + sqrt_discriminant) / (2*a)
124
+ # but produce smaller round off errors.
125
+ # Look at Matrix Computation p.97
126
+ # for a better justification.
127
+ aux = b + copysign(sqrt_discriminant, b)
128
+ ta = -aux / (2*a)
129
+ tb = -2*c / aux
130
+ ta, tb = sorted([ta, tb])
131
+
132
+ if entire_line:
133
+ intersect = True
134
+ else:
135
+ # Checks to see if intersection happens
136
+ # within vectors length.
137
+ if tb < 0 or ta > 1:
138
+ intersect = False
139
+ ta = 0
140
+ tb = 0
141
+ else:
142
+ intersect = True
143
+ # Restrict intersection interval
144
+ # between 0 and 1.
145
+ ta = max(0, ta)
146
+ tb = min(1, tb)
147
+
148
+ return ta, tb, intersect
149
+
150
+
151
+ def box_intersections(z, d, lb, ub,
152
+ entire_line=False):
153
+ """Find the intersection between segment (or line) and box constraints.
154
+
155
+ Find the intersection between the segment (or line) defined by the
156
+ parametric equation ``x(t) = z + t*d`` and the rectangular box
157
+ ``lb <= x <= ub``.
158
+
159
+ Parameters
160
+ ----------
161
+ z : array_like, shape (n,)
162
+ Initial point.
163
+ d : array_like, shape (n,)
164
+ Direction.
165
+ lb : array_like, shape (n,)
166
+ Lower bounds to each one of the components of ``x``. Used
167
+ to delimit the rectangular box.
168
+ ub : array_like, shape (n, )
169
+ Upper bounds to each one of the components of ``x``. Used
170
+ to delimit the rectangular box.
171
+ entire_line : bool, optional
172
+ When ``True``, the function returns the intersection between the line
173
+ ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
174
+ box. When ``False``, the function returns the intersection between the segment
175
+ ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
176
+
177
+ Returns
178
+ -------
179
+ ta, tb : float
180
+ The line/segment ``x(t) = z + t*d`` is inside the box for
181
+ for ``ta <= t <= tb``.
182
+ intersect : bool
183
+ When ``True``, there is a intersection between the line (or segment)
184
+ and the rectangular box. On the other hand, when ``False``, there is no
185
+ intersection.
186
+ """
187
+ # Make sure it is a numpy array
188
+ z = np.asarray(z)
189
+ d = np.asarray(d)
190
+ lb = np.asarray(lb)
191
+ ub = np.asarray(ub)
192
+ # Special case when d=0
193
+ if norm(d) == 0:
194
+ return 0, 0, False
195
+
196
+ # Get values for which d==0
197
+ zero_d = (d == 0)
198
+ # If the boundaries are not satisfied for some coordinate
199
+ # for which "d" is zero, there is no box-line intersection.
200
+ if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
201
+ intersect = False
202
+ return 0, 0, intersect
203
+ # Remove values for which d is zero
204
+ not_zero_d = np.logical_not(zero_d)
205
+ z = z[not_zero_d]
206
+ d = d[not_zero_d]
207
+ lb = lb[not_zero_d]
208
+ ub = ub[not_zero_d]
209
+
210
+ # Find a series of intervals (t_lb[i], t_ub[i]).
211
+ t_lb = (lb-z) / d
212
+ t_ub = (ub-z) / d
213
+ # Get the intersection of all those intervals.
214
+ ta = max(np.minimum(t_lb, t_ub))
215
+ tb = min(np.maximum(t_lb, t_ub))
216
+
217
+ # Check if intersection is feasible
218
+ if ta <= tb:
219
+ intersect = True
220
+ else:
221
+ intersect = False
222
+ # Checks to see if intersection happens within vectors length.
223
+ if not entire_line:
224
+ if tb < 0 or ta > 1:
225
+ intersect = False
226
+ ta = 0
227
+ tb = 0
228
+ else:
229
+ # Restrict intersection interval between 0 and 1.
230
+ ta = max(0, ta)
231
+ tb = min(1, tb)
232
+
233
+ return ta, tb, intersect
234
+
235
+
236
+ def box_sphere_intersections(z, d, lb, ub, trust_radius,
237
+ entire_line=False,
238
+ extra_info=False):
239
+ """Find the intersection between segment (or line) and box/sphere constraints.
240
+
241
+ Find the intersection between the segment (or line) defined by the
242
+ parametric equation ``x(t) = z + t*d``, the rectangular box
243
+ ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
244
+
245
+ Parameters
246
+ ----------
247
+ z : array_like, shape (n,)
248
+ Initial point.
249
+ d : array_like, shape (n,)
250
+ Direction.
251
+ lb : array_like, shape (n,)
252
+ Lower bounds to each one of the components of ``x``. Used
253
+ to delimit the rectangular box.
254
+ ub : array_like, shape (n, )
255
+ Upper bounds to each one of the components of ``x``. Used
256
+ to delimit the rectangular box.
257
+ trust_radius : float
258
+ Ball radius.
259
+ entire_line : bool, optional
260
+ When ``True``, the function returns the intersection between the line
261
+ ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
262
+ When ``False``, the function returns the intersection between the segment
263
+ ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
264
+ extra_info : bool, optional
265
+ When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``.
266
+
267
+ Returns
268
+ -------
269
+ ta, tb : float
270
+ The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
271
+ inside the ball for ``ta <= t <= tb``.
272
+ intersect : bool
273
+ When ``True``, there is a intersection between the line (or segment)
274
+ and both constraints. On the other hand, when ``False``, there is no
275
+ intersection.
276
+ sphere_info : dict, optional
277
+ Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
278
+ for which the line intercepts the ball. And a boolean value indicating
279
+ whether the sphere is intersected by the line.
280
+ box_info : dict, optional
281
+ Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
282
+ for which the line intercepts the box. And a boolean value indicating
283
+ whether the box is intersected by the line.
284
+ """
285
+ ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
286
+ entire_line)
287
+ ta_s, tb_s, intersect_s = sphere_intersections(z, d,
288
+ trust_radius,
289
+ entire_line)
290
+ ta = np.maximum(ta_b, ta_s)
291
+ tb = np.minimum(tb_b, tb_s)
292
+ if intersect_b and intersect_s and ta <= tb:
293
+ intersect = True
294
+ else:
295
+ intersect = False
296
+
297
+ if extra_info:
298
+ sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
299
+ box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
300
+ return ta, tb, intersect, sphere_info, box_info
301
+ else:
302
+ return ta, tb, intersect
303
+
304
+
305
+ def inside_box_boundaries(x, lb, ub):
306
+ """Check if lb <= x <= ub."""
307
+ return (lb <= x).all() and (x <= ub).all()
308
+
309
+
310
+ def reinforce_box_boundaries(x, lb, ub):
311
+ """Return clipped value of x"""
312
+ return np.minimum(np.maximum(x, lb), ub)
313
+
314
+
315
+ def modified_dogleg(A, Y, b, trust_radius, lb, ub):
316
+ """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region.
317
+
318
+ Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
319
+ subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
320
+ of the classical dogleg approach.
321
+
322
+ Parameters
323
+ ----------
324
+ A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
325
+ Matrix ``A`` in the minimization problem. It should have
326
+ dimension ``(m, n)`` such that ``m < n``.
327
+ Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
328
+ LinearOperator that apply the projection matrix
329
+ ``Q = A.T inv(A A.T)`` to the vector. The obtained vector
330
+ ``y = Q x`` being the minimum norm solution of ``A y = x``.
331
+ b : array_like, shape (m,)
332
+ Vector ``b``in the minimization problem.
333
+ trust_radius: float
334
+ Trust radius to be considered. Delimits a sphere boundary
335
+ to the problem.
336
+ lb : array_like, shape (n,)
337
+ Lower bounds to each one of the components of ``x``.
338
+ It is expected that ``lb <= 0``, otherwise the algorithm
339
+ may fail. If ``lb[i] = -Inf``, the lower
340
+ bound for the ith component is just ignored.
341
+ ub : array_like, shape (n, )
342
+ Upper bounds to each one of the components of ``x``.
343
+ It is expected that ``ub >= 0``, otherwise the algorithm
344
+ may fail. If ``ub[i] = Inf``, the upper bound for the ith
345
+ component is just ignored.
346
+
347
+ Returns
348
+ -------
349
+ x : array_like, shape (n,)
350
+ Solution to the problem.
351
+
352
+ Notes
353
+ -----
354
+ Based on implementations described in pp. 885-886 from [1]_.
355
+
356
+ References
357
+ ----------
358
+ .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
359
+ "An interior point algorithm for large-scale nonlinear
360
+ programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
361
+ """
362
+ # Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
363
+ newton_point = -Y.dot(b)
364
+ # Check for interior point
365
+ if inside_box_boundaries(newton_point, lb, ub) \
366
+ and norm(newton_point) <= trust_radius:
367
+ x = newton_point
368
+ return x
369
+
370
+ # Compute gradient vector ``g = A.T b``
371
+ g = A.T.dot(b)
372
+ # Compute Cauchy point
373
+ # `cauchy_point = g.T g / (g.T A.T A g)``.
374
+ A_g = A.dot(g)
375
+ cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
376
+ # Origin
377
+ origin_point = np.zeros_like(cauchy_point)
378
+
379
+ # Check the segment between cauchy_point and newton_point
380
+ # for a possible solution.
381
+ z = cauchy_point
382
+ p = newton_point - cauchy_point
383
+ _, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
384
+ trust_radius)
385
+ if intersect:
386
+ x1 = z + alpha*p
387
+ else:
388
+ # Check the segment between the origin and cauchy_point
389
+ # for a possible solution.
390
+ z = origin_point
391
+ p = cauchy_point
392
+ _, alpha, _ = box_sphere_intersections(z, p, lb, ub,
393
+ trust_radius)
394
+ x1 = z + alpha*p
395
+
396
+ # Check the segment between origin and newton_point
397
+ # for a possible solution.
398
+ z = origin_point
399
+ p = newton_point
400
+ _, alpha, _ = box_sphere_intersections(z, p, lb, ub,
401
+ trust_radius)
402
+ x2 = z + alpha*p
403
+
404
+ # Return the best solution among x1 and x2.
405
+ if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
406
+ return x1
407
+ else:
408
+ return x2
409
+
410
+
411
+ def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
412
+ lb=None, ub=None, tol=None,
413
+ max_iter=None, max_infeasible_iter=None,
414
+ return_all=False):
415
+ """Solve EQP problem with projected CG method.
416
+
417
+ Solve equality-constrained quadratic programming problem
418
+ ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and,
419
+ possibly, to trust region constraints ``||x|| < trust_radius``
420
+ and box constraints ``lb <= x <= ub``.
421
+
422
+ Parameters
423
+ ----------
424
+ H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
425
+ Operator for computing ``H v``.
426
+ c : array_like, shape (n,)
427
+ Gradient of the quadratic objective function.
428
+ Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
429
+ Operator for projecting ``x`` into the null space of A.
430
+ Y : LinearOperator, sparse matrix, ndarray, shape (n, m)
431
+ Operator that, for a given a vector ``b``, compute smallest
432
+ norm solution of ``A x + b = 0``.
433
+ b : array_like, shape (m,)
434
+ Right-hand side of the constraint equation.
435
+ trust_radius : float, optional
436
+ Trust radius to be considered. By default, uses ``trust_radius=inf``,
437
+ which means no trust radius at all.
438
+ lb : array_like, shape (n,), optional
439
+ Lower bounds to each one of the components of ``x``.
440
+ If ``lb[i] = -Inf`` the lower bound for the i-th
441
+ component is just ignored (default).
442
+ ub : array_like, shape (n, ), optional
443
+ Upper bounds to each one of the components of ``x``.
444
+ If ``ub[i] = Inf`` the upper bound for the i-th
445
+ component is just ignored (default).
446
+ tol : float, optional
447
+ Tolerance used to interrupt the algorithm.
448
+ max_iter : int, optional
449
+ Maximum algorithm iterations. Where ``max_inter <= n-m``.
450
+ By default, uses ``max_iter = n-m``.
451
+ max_infeasible_iter : int, optional
452
+ Maximum infeasible (regarding box constraints) iterations the
453
+ algorithm is allowed to take.
454
+ By default, uses ``max_infeasible_iter = n-m``.
455
+ return_all : bool, optional
456
+ When ``true``, return the list of all vectors through the iterations.
457
+
458
+ Returns
459
+ -------
460
+ x : array_like, shape (n,)
461
+ Solution of the EQP problem.
462
+ info : Dict
463
+ Dictionary containing the following:
464
+
465
+ - niter : Number of iterations.
466
+ - stop_cond : Reason for algorithm termination:
467
+ 1. Iteration limit was reached;
468
+ 2. Reached the trust-region boundary;
469
+ 3. Negative curvature detected;
470
+ 4. Tolerance was satisfied.
471
+ - allvecs : List containing all intermediary vectors (optional).
472
+ - hits_boundary : True if the proposed step is on the boundary
473
+ of the trust region.
474
+
475
+ Notes
476
+ -----
477
+ Implementation of Algorithm 6.2 on [1]_.
478
+
479
+ In the absence of spherical and box constraints, for sufficient
480
+ iterations, the method returns a truly optimal result.
481
+ In the presence of those constraints, the value returned is only
482
+ a inexpensive approximation of the optimal value.
483
+
484
+ References
485
+ ----------
486
+ .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
487
+ "On the solution of equality constrained quadratic
488
+ programming problems arising in optimization."
489
+ SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
490
+ """
491
+ CLOSE_TO_ZERO = 1e-25
492
+
493
+ n, = np.shape(c) # Number of parameters
494
+ m, = np.shape(b) # Number of constraints
495
+
496
+ # Initial Values
497
+ x = Y.dot(-b)
498
+ r = Z.dot(H.dot(x) + c)
499
+ g = Z.dot(r)
500
+ p = -g
501
+
502
+ # Store ``x`` value
503
+ if return_all:
504
+ allvecs = [x]
505
+ # Values for the first iteration
506
+ H_p = H.dot(p)
507
+ rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
508
+
509
+ # If x > trust-region the problem does not have a solution.
510
+ tr_distance = trust_radius - norm(x)
511
+ if tr_distance < 0:
512
+ raise ValueError("Trust region problem does not have a solution.")
513
+ # If x == trust_radius, then x is the solution
514
+ # to the optimization problem, since x is the
515
+ # minimum norm solution to Ax=b.
516
+ elif tr_distance < CLOSE_TO_ZERO:
517
+ info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
518
+ if return_all:
519
+ allvecs.append(x)
520
+ info['allvecs'] = allvecs
521
+ return x, info
522
+
523
+ # Set default tolerance
524
+ if tol is None:
525
+ tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
526
+ # Set default lower and upper bounds
527
+ if lb is None:
528
+ lb = np.full(n, -np.inf)
529
+ if ub is None:
530
+ ub = np.full(n, np.inf)
531
+ # Set maximum iterations
532
+ if max_iter is None:
533
+ max_iter = n-m
534
+ max_iter = min(max_iter, n-m)
535
+ # Set maximum infeasible iterations
536
+ if max_infeasible_iter is None:
537
+ max_infeasible_iter = n-m
538
+
539
+ hits_boundary = False
540
+ stop_cond = 1
541
+ counter = 0
542
+ last_feasible_x = np.zeros_like(x)
543
+ k = 0
544
+ for i in range(max_iter):
545
+ # Stop criteria - Tolerance : r.T g < tol
546
+ if rt_g < tol:
547
+ stop_cond = 4
548
+ break
549
+ k += 1
550
+ # Compute curvature
551
+ pt_H_p = H_p.dot(p)
552
+ # Stop criteria - Negative curvature
553
+ if pt_H_p <= 0:
554
+ if np.isinf(trust_radius):
555
+ raise ValueError("Negative curvature not allowed "
556
+ "for unrestricted problems.")
557
+ else:
558
+ # Find intersection with constraints
559
+ _, alpha, intersect = box_sphere_intersections(
560
+ x, p, lb, ub, trust_radius, entire_line=True)
561
+ # Update solution
562
+ if intersect:
563
+ x = x + alpha*p
564
+ # Reinforce variables are inside box constraints.
565
+ # This is only necessary because of roundoff errors.
566
+ x = reinforce_box_boundaries(x, lb, ub)
567
+ # Attribute information
568
+ stop_cond = 3
569
+ hits_boundary = True
570
+ break
571
+
572
+ # Get next step
573
+ alpha = rt_g / pt_H_p
574
+ x_next = x + alpha*p
575
+
576
+ # Stop criteria - Hits boundary
577
+ if np.linalg.norm(x_next) >= trust_radius:
578
+ # Find intersection with box constraints
579
+ _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
580
+ trust_radius)
581
+ # Update solution
582
+ if intersect:
583
+ x = x + theta*alpha*p
584
+ # Reinforce variables are inside box constraints.
585
+ # This is only necessary because of roundoff errors.
586
+ x = reinforce_box_boundaries(x, lb, ub)
587
+ # Attribute information
588
+ stop_cond = 2
589
+ hits_boundary = True
590
+ break
591
+
592
+ # Check if ``x`` is inside the box and start counter if it is not.
593
+ if inside_box_boundaries(x_next, lb, ub):
594
+ counter = 0
595
+ else:
596
+ counter += 1
597
+ # Whenever outside box constraints keep looking for intersections.
598
+ if counter > 0:
599
+ _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
600
+ trust_radius)
601
+ if intersect:
602
+ last_feasible_x = x + theta*alpha*p
603
+ # Reinforce variables are inside box constraints.
604
+ # This is only necessary because of roundoff errors.
605
+ last_feasible_x = reinforce_box_boundaries(last_feasible_x,
606
+ lb, ub)
607
+ counter = 0
608
+ # Stop after too many infeasible (regarding box constraints) iteration.
609
+ if counter > max_infeasible_iter:
610
+ break
611
+ # Store ``x_next`` value
612
+ if return_all:
613
+ allvecs.append(x_next)
614
+
615
+ # Update residual
616
+ r_next = r + alpha*H_p
617
+ # Project residual g+ = Z r+
618
+ g_next = Z.dot(r_next)
619
+ # Compute conjugate direction step d
620
+ rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389)
621
+ beta = rt_g_next / rt_g
622
+ p = - g_next + beta*p
623
+ # Prepare for next iteration
624
+ x = x_next
625
+ g = g_next
626
+ r = g_next
627
+ rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
628
+ H_p = H.dot(p)
629
+
630
+ if not inside_box_boundaries(x, lb, ub):
631
+ x = last_feasible_x
632
+ hits_boundary = True
633
+ info = {'niter': k, 'stop_cond': stop_cond,
634
+ 'hits_boundary': hits_boundary}
635
+ if return_all:
636
+ info['allvecs'] = allvecs
637
+ return x, info
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Progress report printers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ class ReportBase:
6
+ COLUMN_NAMES: list[str] = NotImplemented
7
+ COLUMN_WIDTHS: list[int] = NotImplemented
8
+ ITERATION_FORMATS: list[str] = NotImplemented
9
+
10
+ @classmethod
11
+ def print_header(cls):
12
+ fmt = ("|"
13
+ + "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS])
14
+ + "|")
15
+ separators = ['-' * x for x in cls.COLUMN_WIDTHS]
16
+ print(fmt.format(*cls.COLUMN_NAMES))
17
+ print(fmt.format(*separators))
18
+
19
+ @classmethod
20
+ def print_iteration(cls, *args):
21
+ iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS]
22
+ fmt = "|" + "|".join(iteration_format) + "|"
23
+ print(fmt.format(*args))
24
+
25
+ @classmethod
26
+ def print_footer(cls):
27
+ print()
28
+
29
+
30
+ class BasicReport(ReportBase):
31
+ COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
32
+ "opt", "c viol"]
33
+ COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
34
+ ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
35
+ "^10.2e", "^10.2e", "^10.2e"]
36
+
37
+
38
+ class SQPReport(ReportBase):
39
+ COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
40
+ "opt", "c viol", "penalty", "CG stop"]
41
+ COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7]
42
+ ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
43
+ "^10.2e", "^10.2e", "^7"]
44
+
45
+
46
+ class IPReport(ReportBase):
47
+ COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
48
+ "opt", "c viol", "penalty", "barrier param", "CG stop"]
49
+ COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
50
+ ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
51
+ "^10.2e", "^10.2e", "^13.2e", "^7"]
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_array_equal, assert_equal
3
+ from scipy.optimize._constraints import (NonlinearConstraint, Bounds,
4
+ PreparedConstraint)
5
+ from scipy.optimize._trustregion_constr.canonical_constraint \
6
+ import CanonicalConstraint, initial_constraints_as_canonical
7
+
8
+
9
+ def create_quadratic_function(n, m, rng):
10
+ a = rng.rand(m)
11
+ A = rng.rand(m, n)
12
+ H = rng.rand(m, n, n)
13
+ HT = np.transpose(H, (1, 2, 0))
14
+
15
+ def fun(x):
16
+ return a + A.dot(x) + 0.5 * H.dot(x).dot(x)
17
+
18
+ def jac(x):
19
+ return A + H.dot(x)
20
+
21
+ def hess(x, v):
22
+ return HT.dot(v)
23
+
24
+ return fun, jac, hess
25
+
26
+
27
+ def test_bounds_cases():
28
+ # Test 1: no constraints.
29
+ user_constraint = Bounds(-np.inf, np.inf)
30
+ x0 = np.array([-1, 2])
31
+ prepared_constraint = PreparedConstraint(user_constraint, x0, False)
32
+ c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
33
+
34
+ assert_equal(c.n_eq, 0)
35
+ assert_equal(c.n_ineq, 0)
36
+
37
+ c_eq, c_ineq = c.fun(x0)
38
+ assert_array_equal(c_eq, [])
39
+ assert_array_equal(c_ineq, [])
40
+
41
+ J_eq, J_ineq = c.jac(x0)
42
+ assert_array_equal(J_eq, np.empty((0, 2)))
43
+ assert_array_equal(J_ineq, np.empty((0, 2)))
44
+
45
+ assert_array_equal(c.keep_feasible, [])
46
+
47
+ # Test 2: infinite lower bound.
48
+ user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True])
49
+ x0 = np.array([-1, -2, -3], dtype=float)
50
+ prepared_constraint = PreparedConstraint(user_constraint, x0, False)
51
+ c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
52
+
53
+ assert_equal(c.n_eq, 0)
54
+ assert_equal(c.n_ineq, 2)
55
+
56
+ c_eq, c_ineq = c.fun(x0)
57
+ assert_array_equal(c_eq, [])
58
+ assert_array_equal(c_ineq, [-1, -4])
59
+
60
+ J_eq, J_ineq = c.jac(x0)
61
+ assert_array_equal(J_eq, np.empty((0, 3)))
62
+ assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]]))
63
+
64
+ assert_array_equal(c.keep_feasible, [False, True])
65
+
66
+ # Test 3: infinite upper bound.
67
+ user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True])
68
+ x0 = np.array([1, 2, 3], dtype=float)
69
+ prepared_constraint = PreparedConstraint(user_constraint, x0, False)
70
+ c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
71
+
72
+ assert_equal(c.n_eq, 0)
73
+ assert_equal(c.n_ineq, 2)
74
+
75
+ c_eq, c_ineq = c.fun(x0)
76
+ assert_array_equal(c_eq, [])
77
+ assert_array_equal(c_ineq, [-1, -1])
78
+
79
+ J_eq, J_ineq = c.jac(x0)
80
+ assert_array_equal(J_eq, np.empty((0, 3)))
81
+ assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]]))
82
+
83
+ assert_array_equal(c.keep_feasible, [True, False])
84
+
85
+ # Test 4: interval constraint.
86
+ user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3],
87
+ [False, True, True, True])
88
+ x0 = np.array([0, 10, 8, 5])
89
+ prepared_constraint = PreparedConstraint(user_constraint, x0, False)
90
+ c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
91
+
92
+ assert_equal(c.n_eq, 1)
93
+ assert_equal(c.n_ineq, 4)
94
+
95
+ c_eq, c_ineq = c.fun(x0)
96
+ assert_array_equal(c_eq, [2])
97
+ assert_array_equal(c_ineq, [-1, -2, -1, -6])
98
+
99
+ J_eq, J_ineq = c.jac(x0)
100
+ assert_array_equal(J_eq, [[0, 0, 0, 1]])
101
+ assert_array_equal(J_ineq, [[1, 0, 0, 0],
102
+ [0, 0, 1, 0],
103
+ [-1, 0, 0, 0],
104
+ [0, 0, -1, 0]])
105
+
106
+ assert_array_equal(c.keep_feasible, [False, True, False, True])
107
+
108
+
109
+ def test_nonlinear_constraint():
110
+ n = 3
111
+ m = 5
112
+ rng = np.random.RandomState(0)
113
+ x0 = rng.rand(n)
114
+
115
+ fun, jac, hess = create_quadratic_function(n, m, rng)
116
+ f = fun(x0)
117
+ J = jac(x0)
118
+
119
+ lb = [-10, 3, -np.inf, -np.inf, -5]
120
+ ub = [10, 3, np.inf, 3, np.inf]
121
+ user_constraint = NonlinearConstraint(
122
+ fun, lb, ub, jac, hess, [True, False, False, True, False])
123
+
124
+ for sparse_jacobian in [False, True]:
125
+ prepared_constraint = PreparedConstraint(user_constraint, x0,
126
+ sparse_jacobian)
127
+ c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
128
+
129
+ assert_array_equal(c.n_eq, 1)
130
+ assert_array_equal(c.n_ineq, 4)
131
+
132
+ c_eq, c_ineq = c.fun(x0)
133
+ assert_array_equal(c_eq, [f[1] - lb[1]])
134
+ assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4],
135
+ f[0] - ub[0], lb[0] - f[0]])
136
+
137
+ J_eq, J_ineq = c.jac(x0)
138
+ if sparse_jacobian:
139
+ J_eq = J_eq.toarray()
140
+ J_ineq = J_ineq.toarray()
141
+
142
+ assert_array_equal(J_eq, J[1, None])
143
+ assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0])))
144
+
145
+ v_eq = rng.rand(c.n_eq)
146
+ v_ineq = rng.rand(c.n_ineq)
147
+ v = np.zeros(m)
148
+ v[1] = v_eq[0]
149
+ v[3] = v_ineq[0]
150
+ v[4] = -v_ineq[1]
151
+ v[0] = v_ineq[2] - v_ineq[3]
152
+ assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v))
153
+
154
+ assert_array_equal(c.keep_feasible, [True, False, True, True])
155
+
156
+
157
+ def test_concatenation():
158
+ rng = np.random.RandomState(0)
159
+ n = 4
160
+ x0 = rng.rand(n)
161
+
162
+ f1 = x0
163
+ J1 = np.eye(n)
164
+ lb1 = [-1, -np.inf, -2, 3]
165
+ ub1 = [1, np.inf, np.inf, 3]
166
+ bounds = Bounds(lb1, ub1, [False, False, True, False])
167
+
168
+ fun, jac, hess = create_quadratic_function(n, 5, rng)
169
+ f2 = fun(x0)
170
+ J2 = jac(x0)
171
+ lb2 = [-10, 3, -np.inf, -np.inf, -5]
172
+ ub2 = [10, 3, np.inf, 5, np.inf]
173
+ nonlinear = NonlinearConstraint(
174
+ fun, lb2, ub2, jac, hess, [True, False, False, True, False])
175
+
176
+ for sparse_jacobian in [False, True]:
177
+ bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
178
+ nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
179
+
180
+ c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared)
181
+ c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared)
182
+ c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian)
183
+
184
+ assert_equal(c.n_eq, 2)
185
+ assert_equal(c.n_ineq, 7)
186
+
187
+ c_eq, c_ineq = c.fun(x0)
188
+ assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
189
+ assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
190
+ lb1[0] - f1[0], f2[3] - ub2[3],
191
+ lb2[4] - f2[4], f2[0] - ub2[0],
192
+ lb2[0] - f2[0]])
193
+
194
+ J_eq, J_ineq = c.jac(x0)
195
+ if sparse_jacobian:
196
+ J_eq = J_eq.toarray()
197
+ J_ineq = J_ineq.toarray()
198
+
199
+ assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
200
+ assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
201
+ -J2[4], J2[0], -J2[0])))
202
+
203
+ v_eq = rng.rand(c.n_eq)
204
+ v_ineq = rng.rand(c.n_ineq)
205
+ v = np.zeros(5)
206
+ v[1] = v_eq[1]
207
+ v[3] = v_ineq[3]
208
+ v[4] = -v_ineq[4]
209
+ v[0] = v_ineq[5] - v_ineq[6]
210
+ H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n))
211
+ assert_array_equal(H, hess(x0, v))
212
+
213
+ assert_array_equal(c.keep_feasible,
214
+ [True, False, False, True, False, True, True])
215
+
216
+
217
+ def test_empty():
218
+ x = np.array([1, 2, 3])
219
+ c = CanonicalConstraint.empty(3)
220
+ assert_equal(c.n_eq, 0)
221
+ assert_equal(c.n_ineq, 0)
222
+
223
+ c_eq, c_ineq = c.fun(x)
224
+ assert_array_equal(c_eq, [])
225
+ assert_array_equal(c_ineq, [])
226
+
227
+ J_eq, J_ineq = c.jac(x)
228
+ assert_array_equal(J_eq, np.empty((0, 3)))
229
+ assert_array_equal(J_ineq, np.empty((0, 3)))
230
+
231
+ H = c.hess(x, None, None).toarray()
232
+ assert_array_equal(H, np.zeros((3, 3)))
233
+
234
+
235
+ def test_initial_constraints_as_canonical():
236
+ # rng is only used to generate the coefficients of the quadratic
237
+ # function that is used by the nonlinear constraint.
238
+ rng = np.random.RandomState(0)
239
+
240
+ x0 = np.array([0.5, 0.4, 0.3, 0.2])
241
+ n = len(x0)
242
+
243
+ lb1 = [-1, -np.inf, -2, 3]
244
+ ub1 = [1, np.inf, np.inf, 3]
245
+ bounds = Bounds(lb1, ub1, [False, False, True, False])
246
+
247
+ fun, jac, hess = create_quadratic_function(n, 5, rng)
248
+ lb2 = [-10, 3, -np.inf, -np.inf, -5]
249
+ ub2 = [10, 3, np.inf, 5, np.inf]
250
+ nonlinear = NonlinearConstraint(
251
+ fun, lb2, ub2, jac, hess, [True, False, False, True, False])
252
+
253
+ for sparse_jacobian in [False, True]:
254
+ bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
255
+ nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
256
+
257
+ f1 = bounds_prepared.fun.f
258
+ J1 = bounds_prepared.fun.J
259
+ f2 = nonlinear_prepared.fun.f
260
+ J2 = nonlinear_prepared.fun.J
261
+
262
+ c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
263
+ n, [bounds_prepared, nonlinear_prepared], sparse_jacobian)
264
+
265
+ assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
266
+ assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
267
+ lb1[0] - f1[0], f2[3] - ub2[3],
268
+ lb2[4] - f2[4], f2[0] - ub2[0],
269
+ lb2[0] - f2[0]])
270
+
271
+ if sparse_jacobian:
272
+ J1 = J1.toarray()
273
+ J2 = J2.toarray()
274
+ J_eq = J_eq.toarray()
275
+ J_ineq = J_ineq.toarray()
276
+
277
+ assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
278
+ assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
279
+ -J2[4], J2[0], -J2[0])))
280
+
281
+
282
+ def test_initial_constraints_as_canonical_empty():
283
+ n = 3
284
+ for sparse_jacobian in [False, True]:
285
+ c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
286
+ n, [], sparse_jacobian)
287
+
288
+ assert_array_equal(c_eq, [])
289
+ assert_array_equal(c_ineq, [])
290
+
291
+ if sparse_jacobian:
292
+ J_eq = J_eq.toarray()
293
+ J_ineq = J_ineq.toarray()
294
+
295
+ assert_array_equal(J_eq, np.empty((0, n)))
296
+ assert_array_equal(J_ineq, np.empty((0, n)))
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.linalg
3
+ from scipy.sparse import csc_matrix
4
+ from scipy.optimize._trustregion_constr.projections \
5
+ import projections, orthogonality
6
+ from numpy.testing import (TestCase, assert_array_almost_equal,
7
+ assert_equal, assert_allclose)
8
+
9
+ try:
10
+ from sksparse.cholmod import cholesky_AAt # noqa: F401
11
+ sksparse_available = True
12
+ available_sparse_methods = ("NormalEquation", "AugmentedSystem")
13
+ except ImportError:
14
+ sksparse_available = False
15
+ available_sparse_methods = ("AugmentedSystem",)
16
+ available_dense_methods = ('QRFactorization', 'SVDFactorization')
17
+
18
+
19
+ class TestProjections(TestCase):
20
+
21
+ def test_nullspace_and_least_squares_sparse(self):
22
+ A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
23
+ [0, 8, 7, 0, 1, 5, 9, 0],
24
+ [1, 0, 0, 0, 0, 1, 2, 3]])
25
+ At_dense = A_dense.T
26
+ A = csc_matrix(A_dense)
27
+ test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
28
+ [1, 10, 3, 0, 1, 6, 7, 8],
29
+ [1.12, 10, 0, 0, 100000, 6, 0.7, 8])
30
+
31
+ for method in available_sparse_methods:
32
+ Z, LS, _ = projections(A, method)
33
+ for z in test_points:
34
+ # Test if x is in the null_space
35
+ x = Z.matvec(z)
36
+ assert_array_almost_equal(A.dot(x), 0)
37
+ # Test orthogonality
38
+ assert_array_almost_equal(orthogonality(A, x), 0)
39
+ # Test if x is the least square solution
40
+ x = LS.matvec(z)
41
+ x2 = scipy.linalg.lstsq(At_dense, z)[0]
42
+ assert_array_almost_equal(x, x2)
43
+
44
+ def test_iterative_refinements_sparse(self):
45
+ A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
46
+ [0, 8, 7, 0, 1, 5, 9, 0],
47
+ [1, 0, 0, 0, 0, 1, 2, 3]])
48
+ A = csc_matrix(A_dense)
49
+ test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
50
+ [1, 10, 3, 0, 1, 6, 7, 8],
51
+ [1.12, 10, 0, 0, 100000, 6, 0.7, 8],
52
+ [1, 0, 0, 0, 0, 1, 2, 3+1e-10])
53
+
54
+ for method in available_sparse_methods:
55
+ Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
56
+ for z in test_points:
57
+ # Test if x is in the null_space
58
+ x = Z.matvec(z)
59
+ atol = 1e-13 * abs(x).max()
60
+ assert_allclose(A.dot(x), 0, atol=atol)
61
+ # Test orthogonality
62
+ assert_allclose(orthogonality(A, x), 0, atol=1e-13)
63
+
64
+ def test_rowspace_sparse(self):
65
+ A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
66
+ [0, 8, 7, 0, 1, 5, 9, 0],
67
+ [1, 0, 0, 0, 0, 1, 2, 3]])
68
+ A = csc_matrix(A_dense)
69
+ test_points = ([1, 2, 3],
70
+ [1, 10, 3],
71
+ [1.12, 10, 0])
72
+
73
+ for method in available_sparse_methods:
74
+ _, _, Y = projections(A, method)
75
+ for z in test_points:
76
+ # Test if x is solution of A x = z
77
+ x = Y.matvec(z)
78
+ assert_array_almost_equal(A.dot(x), z)
79
+ # Test if x is in the return row space of A
80
+ A_ext = np.vstack((A_dense, x))
81
+ assert_equal(np.linalg.matrix_rank(A_dense),
82
+ np.linalg.matrix_rank(A_ext))
83
+
84
+ def test_nullspace_and_least_squares_dense(self):
85
+ A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
86
+ [0, 8, 7, 0, 1, 5, 9, 0],
87
+ [1, 0, 0, 0, 0, 1, 2, 3]])
88
+ At = A.T
89
+ test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
90
+ [1, 10, 3, 0, 1, 6, 7, 8],
91
+ [1.12, 10, 0, 0, 100000, 6, 0.7, 8])
92
+
93
+ for method in available_dense_methods:
94
+ Z, LS, _ = projections(A, method)
95
+ for z in test_points:
96
+ # Test if x is in the null_space
97
+ x = Z.matvec(z)
98
+ assert_array_almost_equal(A.dot(x), 0)
99
+ # Test orthogonality
100
+ assert_array_almost_equal(orthogonality(A, x), 0)
101
+ # Test if x is the least square solution
102
+ x = LS.matvec(z)
103
+ x2 = scipy.linalg.lstsq(At, z)[0]
104
+ assert_array_almost_equal(x, x2)
105
+
106
+ def test_compare_dense_and_sparse(self):
107
+ D = np.diag(range(1, 101))
108
+ A = np.hstack([D, D, D, D])
109
+ A_sparse = csc_matrix(A)
110
+ np.random.seed(0)
111
+
112
+ Z, LS, Y = projections(A)
113
+ Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
114
+ for k in range(20):
115
+ z = np.random.normal(size=(400,))
116
+ assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
117
+ assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
118
+ x = np.random.normal(size=(100,))
119
+ assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
120
+
121
+ def test_compare_dense_and_sparse2(self):
122
+ D1 = np.diag([-1.7, 1, 0.5])
123
+ D2 = np.diag([1, -0.6, -0.3])
124
+ D3 = np.diag([-0.3, -1.5, 2])
125
+ A = np.hstack([D1, D2, D3])
126
+ A_sparse = csc_matrix(A)
127
+ np.random.seed(0)
128
+
129
+ Z, LS, Y = projections(A)
130
+ Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
131
+ for k in range(1):
132
+ z = np.random.normal(size=(9,))
133
+ assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
134
+ assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
135
+ x = np.random.normal(size=(3,))
136
+ assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
137
+
138
+ def test_iterative_refinements_dense(self):
139
+ A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
140
+ [0, 8, 7, 0, 1, 5, 9, 0],
141
+ [1, 0, 0, 0, 0, 1, 2, 3]])
142
+ test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
143
+ [1, 10, 3, 0, 1, 6, 7, 8],
144
+ [1, 0, 0, 0, 0, 1, 2, 3+1e-10])
145
+
146
+ for method in available_dense_methods:
147
+ Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
148
+ for z in test_points:
149
+ # Test if x is in the null_space
150
+ x = Z.matvec(z)
151
+ assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14)
152
+ # Test orthogonality
153
+ assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16)
154
+
155
+ def test_rowspace_dense(self):
156
+ A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
157
+ [0, 8, 7, 0, 1, 5, 9, 0],
158
+ [1, 0, 0, 0, 0, 1, 2, 3]])
159
+ test_points = ([1, 2, 3],
160
+ [1, 10, 3],
161
+ [1.12, 10, 0])
162
+
163
+ for method in available_dense_methods:
164
+ _, _, Y = projections(A, method)
165
+ for z in test_points:
166
+ # Test if x is solution of A x = z
167
+ x = Y.matvec(z)
168
+ assert_array_almost_equal(A.dot(x), z)
169
+ # Test if x is in the return row space of A
170
+ A_ext = np.vstack((A, x))
171
+ assert_equal(np.linalg.matrix_rank(A),
172
+ np.linalg.matrix_rank(A_ext))
173
+
174
+
175
+ class TestOrthogonality(TestCase):
176
+
177
+ def test_dense_matrix(self):
178
+ A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
179
+ [0, 8, 7, 0, 1, 5, 9, 0],
180
+ [1, 0, 0, 0, 0, 1, 2, 3]])
181
+ test_vectors = ([-1.98931144, -1.56363389,
182
+ -0.84115584, 2.2864762,
183
+ 5.599141, 0.09286976,
184
+ 1.37040802, -0.28145812],
185
+ [697.92794044, -4091.65114008,
186
+ -3327.42316335, 836.86906951,
187
+ 99434.98929065, -1285.37653682,
188
+ -4109.21503806, 2935.29289083])
189
+ test_expected_orth = (0, 0)
190
+
191
+ for i in range(len(test_vectors)):
192
+ x = test_vectors[i]
193
+ orth = test_expected_orth[i]
194
+ assert_array_almost_equal(orthogonality(A, x), orth)
195
+
196
+ def test_sparse_matrix(self):
197
+ A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
198
+ [0, 8, 7, 0, 1, 5, 9, 0],
199
+ [1, 0, 0, 0, 0, 1, 2, 3]])
200
+ A = csc_matrix(A)
201
+ test_vectors = ([-1.98931144, -1.56363389,
202
+ -0.84115584, 2.2864762,
203
+ 5.599141, 0.09286976,
204
+ 1.37040802, -0.28145812],
205
+ [697.92794044, -4091.65114008,
206
+ -3327.42316335, 836.86906951,
207
+ 99434.98929065, -1285.37653682,
208
+ -4109.21503806, 2935.29289083])
209
+ test_expected_orth = (0, 0)
210
+
211
+ for i in range(len(test_vectors)):
212
+ x = test_vectors[i]
213
+ orth = test_expected_orth[i]
214
+ assert_array_almost_equal(orthogonality(A, x), orth)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.sparse import csc_matrix
3
+ from scipy.optimize._trustregion_constr.qp_subproblem \
4
+ import (eqp_kktfact,
5
+ projected_cg,
6
+ box_intersections,
7
+ sphere_intersections,
8
+ box_sphere_intersections,
9
+ modified_dogleg)
10
+ from scipy.optimize._trustregion_constr.projections \
11
+ import projections
12
+ from numpy.testing import TestCase, assert_array_almost_equal, assert_equal
13
+ import pytest
14
+
15
+
16
+ class TestEQPDirectFactorization(TestCase):
17
+
18
+ # From Example 16.2 Nocedal/Wright "Numerical
19
+ # Optimization" p.452.
20
+ def test_nocedal_example(self):
21
+ H = csc_matrix([[6, 2, 1],
22
+ [2, 5, 2],
23
+ [1, 2, 4]])
24
+ A = csc_matrix([[1, 0, 1],
25
+ [0, 1, 1]])
26
+ c = np.array([-8, -3, -3])
27
+ b = -np.array([3, 0])
28
+ x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
29
+ assert_array_almost_equal(x, [2, -1, 1])
30
+ assert_array_almost_equal(lagrange_multipliers, [3, -2])
31
+
32
+
33
+ class TestSphericalBoundariesIntersections(TestCase):
34
+
35
+ def test_2d_sphere_constraints(self):
36
+ # Interior inicial point
37
+ ta, tb, intersect = sphere_intersections([0, 0],
38
+ [1, 0], 0.5)
39
+ assert_array_almost_equal([ta, tb], [0, 0.5])
40
+ assert_equal(intersect, True)
41
+
42
+ # No intersection between line and circle
43
+ ta, tb, intersect = sphere_intersections([2, 0],
44
+ [0, 1], 1)
45
+ assert_equal(intersect, False)
46
+
47
+ # Outside initial point pointing toward outside the circle
48
+ ta, tb, intersect = sphere_intersections([2, 0],
49
+ [1, 0], 1)
50
+ assert_equal(intersect, False)
51
+
52
+ # Outside initial point pointing toward inside the circle
53
+ ta, tb, intersect = sphere_intersections([2, 0],
54
+ [-1, 0], 1.5)
55
+ assert_array_almost_equal([ta, tb], [0.5, 1])
56
+ assert_equal(intersect, True)
57
+
58
+ # Initial point on the boundary
59
+ ta, tb, intersect = sphere_intersections([2, 0],
60
+ [1, 0], 2)
61
+ assert_array_almost_equal([ta, tb], [0, 0])
62
+ assert_equal(intersect, True)
63
+
64
+ def test_2d_sphere_constraints_line_intersections(self):
65
+ # Interior initial point
66
+ ta, tb, intersect = sphere_intersections([0, 0],
67
+ [1, 0], 0.5,
68
+ entire_line=True)
69
+ assert_array_almost_equal([ta, tb], [-0.5, 0.5])
70
+ assert_equal(intersect, True)
71
+
72
+ # No intersection between line and circle
73
+ ta, tb, intersect = sphere_intersections([2, 0],
74
+ [0, 1], 1,
75
+ entire_line=True)
76
+ assert_equal(intersect, False)
77
+
78
+ # Outside initial point pointing toward outside the circle
79
+ ta, tb, intersect = sphere_intersections([2, 0],
80
+ [1, 0], 1,
81
+ entire_line=True)
82
+ assert_array_almost_equal([ta, tb], [-3, -1])
83
+ assert_equal(intersect, True)
84
+
85
+ # Outside initial point pointing toward inside the circle
86
+ ta, tb, intersect = sphere_intersections([2, 0],
87
+ [-1, 0], 1.5,
88
+ entire_line=True)
89
+ assert_array_almost_equal([ta, tb], [0.5, 3.5])
90
+ assert_equal(intersect, True)
91
+
92
+ # Initial point on the boundary
93
+ ta, tb, intersect = sphere_intersections([2, 0],
94
+ [1, 0], 2,
95
+ entire_line=True)
96
+ assert_array_almost_equal([ta, tb], [-4, 0])
97
+ assert_equal(intersect, True)
98
+
99
+
100
+ class TestBoxBoundariesIntersections(TestCase):
101
+
102
+ def test_2d_box_constraints(self):
103
+ # Box constraint in the direction of vector d
104
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
105
+ [1, 1], [3, 3])
106
+ assert_array_almost_equal([ta, tb], [0.5, 1])
107
+ assert_equal(intersect, True)
108
+
109
+ # Negative direction
110
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
111
+ [1, -3], [3, -1])
112
+ assert_equal(intersect, False)
113
+
114
+ # Some constraints are absent (set to +/- inf)
115
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
116
+ [-np.inf, 1],
117
+ [np.inf, np.inf])
118
+ assert_array_almost_equal([ta, tb], [0.5, 1])
119
+ assert_equal(intersect, True)
120
+
121
+ # Intersect on the face of the box
122
+ ta, tb, intersect = box_intersections([1, 0], [0, 1],
123
+ [1, 1], [3, 3])
124
+ assert_array_almost_equal([ta, tb], [1, 1])
125
+ assert_equal(intersect, True)
126
+
127
+ # Interior initial point
128
+ ta, tb, intersect = box_intersections([0, 0], [4, 4],
129
+ [-2, -3], [3, 2])
130
+ assert_array_almost_equal([ta, tb], [0, 0.5])
131
+ assert_equal(intersect, True)
132
+
133
+ # No intersection between line and box constraints
134
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
135
+ [-3, -3], [-1, -1])
136
+ assert_equal(intersect, False)
137
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
138
+ [-3, 3], [-1, 1])
139
+ assert_equal(intersect, False)
140
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
141
+ [-3, -np.inf],
142
+ [-1, np.inf])
143
+ assert_equal(intersect, False)
144
+ ta, tb, intersect = box_intersections([0, 0], [1, 100],
145
+ [1, 1], [3, 3])
146
+ assert_equal(intersect, False)
147
+ ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
148
+ [1, 1], [3, 3])
149
+ assert_equal(intersect, False)
150
+
151
+ # Initial point on the boundary
152
+ ta, tb, intersect = box_intersections([2, 2], [0, 1],
153
+ [-2, -2], [2, 2])
154
+ assert_array_almost_equal([ta, tb], [0, 0])
155
+ assert_equal(intersect, True)
156
+
157
+ def test_2d_box_constraints_entire_line(self):
158
+ # Box constraint in the direction of vector d
159
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
160
+ [1, 1], [3, 3],
161
+ entire_line=True)
162
+ assert_array_almost_equal([ta, tb], [0.5, 1.5])
163
+ assert_equal(intersect, True)
164
+
165
+ # Negative direction
166
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
167
+ [1, -3], [3, -1],
168
+ entire_line=True)
169
+ assert_array_almost_equal([ta, tb], [-1.5, -0.5])
170
+ assert_equal(intersect, True)
171
+
172
+ # Some constraints are absent (set to +/- inf)
173
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
174
+ [-np.inf, 1],
175
+ [np.inf, np.inf],
176
+ entire_line=True)
177
+ assert_array_almost_equal([ta, tb], [0.5, np.inf])
178
+ assert_equal(intersect, True)
179
+
180
+ # Intersect on the face of the box
181
+ ta, tb, intersect = box_intersections([1, 0], [0, 1],
182
+ [1, 1], [3, 3],
183
+ entire_line=True)
184
+ assert_array_almost_equal([ta, tb], [1, 3])
185
+ assert_equal(intersect, True)
186
+
187
+ # Interior initial pointoint
188
+ ta, tb, intersect = box_intersections([0, 0], [4, 4],
189
+ [-2, -3], [3, 2],
190
+ entire_line=True)
191
+ assert_array_almost_equal([ta, tb], [-0.5, 0.5])
192
+ assert_equal(intersect, True)
193
+
194
+ # No intersection between line and box constraints
195
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
196
+ [-3, -3], [-1, -1],
197
+ entire_line=True)
198
+ assert_equal(intersect, False)
199
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
200
+ [-3, 3], [-1, 1],
201
+ entire_line=True)
202
+ assert_equal(intersect, False)
203
+ ta, tb, intersect = box_intersections([2, 0], [0, 2],
204
+ [-3, -np.inf],
205
+ [-1, np.inf],
206
+ entire_line=True)
207
+ assert_equal(intersect, False)
208
+ ta, tb, intersect = box_intersections([0, 0], [1, 100],
209
+ [1, 1], [3, 3],
210
+ entire_line=True)
211
+ assert_equal(intersect, False)
212
+ ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
213
+ [1, 1], [3, 3],
214
+ entire_line=True)
215
+ assert_equal(intersect, False)
216
+
217
+ # Initial point on the boundary
218
+ ta, tb, intersect = box_intersections([2, 2], [0, 1],
219
+ [-2, -2], [2, 2],
220
+ entire_line=True)
221
+ assert_array_almost_equal([ta, tb], [-4, 0])
222
+ assert_equal(intersect, True)
223
+
224
+ def test_3d_box_constraints(self):
225
+ # Simple case
226
+ ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
227
+ [1, 1, 1], [3, 3, 3])
228
+ assert_array_almost_equal([ta, tb], [1, 1])
229
+ assert_equal(intersect, True)
230
+
231
+ # Negative direction
232
+ ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
233
+ [1, 1, 1], [3, 3, 3])
234
+ assert_equal(intersect, False)
235
+
236
+ # Interior point
237
+ ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
238
+ [1, 1, 1], [3, 3, 3])
239
+ assert_array_almost_equal([ta, tb], [0, 1])
240
+ assert_equal(intersect, True)
241
+
242
+ def test_3d_box_constraints_entire_line(self):
243
+ # Simple case
244
+ ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
245
+ [1, 1, 1], [3, 3, 3],
246
+ entire_line=True)
247
+ assert_array_almost_equal([ta, tb], [1, 3])
248
+ assert_equal(intersect, True)
249
+
250
+ # Negative direction
251
+ ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
252
+ [1, 1, 1], [3, 3, 3],
253
+ entire_line=True)
254
+ assert_array_almost_equal([ta, tb], [-3, -1])
255
+ assert_equal(intersect, True)
256
+
257
+ # Interior point
258
+ ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
259
+ [1, 1, 1], [3, 3, 3],
260
+ entire_line=True)
261
+ assert_array_almost_equal([ta, tb], [-1, 1])
262
+ assert_equal(intersect, True)
263
+
264
+
265
+ class TestBoxSphereBoundariesIntersections(TestCase):
266
+
267
+ def test_2d_box_constraints(self):
268
+ # Both constraints are active
269
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
270
+ [-1, -2], [1, 2], 2,
271
+ entire_line=False)
272
+ assert_array_almost_equal([ta, tb], [0, 0.5])
273
+ assert_equal(intersect, True)
274
+
275
+ # None of the constraints are active
276
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
277
+ [-1, -3], [1, 3], 10,
278
+ entire_line=False)
279
+ assert_array_almost_equal([ta, tb], [0, 1])
280
+ assert_equal(intersect, True)
281
+
282
+ # Box constraints are active
283
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
284
+ [-1, -3], [1, 3], 10,
285
+ entire_line=False)
286
+ assert_array_almost_equal([ta, tb], [0, 0.5])
287
+ assert_equal(intersect, True)
288
+
289
+ # Spherical constraints are active
290
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
291
+ [-1, -3], [1, 3], 2,
292
+ entire_line=False)
293
+ assert_array_almost_equal([ta, tb], [0, 0.25])
294
+ assert_equal(intersect, True)
295
+
296
+ # Infeasible problems
297
+ ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
298
+ [-1, -3], [1, 3], 2,
299
+ entire_line=False)
300
+ assert_equal(intersect, False)
301
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
302
+ [2, 4], [2, 4], 2,
303
+ entire_line=False)
304
+ assert_equal(intersect, False)
305
+
306
+ def test_2d_box_constraints_entire_line(self):
307
+ # Both constraints are active
308
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
309
+ [-1, -2], [1, 2], 2,
310
+ entire_line=True)
311
+ assert_array_almost_equal([ta, tb], [0, 0.5])
312
+ assert_equal(intersect, True)
313
+
314
+ # None of the constraints are active
315
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
316
+ [-1, -3], [1, 3], 10,
317
+ entire_line=True)
318
+ assert_array_almost_equal([ta, tb], [0, 2])
319
+ assert_equal(intersect, True)
320
+
321
+ # Box constraints are active
322
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
323
+ [-1, -3], [1, 3], 10,
324
+ entire_line=True)
325
+ assert_array_almost_equal([ta, tb], [0, 0.5])
326
+ assert_equal(intersect, True)
327
+
328
+ # Spherical constraints are active
329
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
330
+ [-1, -3], [1, 3], 2,
331
+ entire_line=True)
332
+ assert_array_almost_equal([ta, tb], [0, 0.25])
333
+ assert_equal(intersect, True)
334
+
335
+ # Infeasible problems
336
+ ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
337
+ [-1, -3], [1, 3], 2,
338
+ entire_line=True)
339
+ assert_equal(intersect, False)
340
+ ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
341
+ [2, 4], [2, 4], 2,
342
+ entire_line=True)
343
+ assert_equal(intersect, False)
344
+
345
+
346
+ class TestModifiedDogleg(TestCase):
347
+
348
+ def test_cauchypoint_equalsto_newtonpoint(self):
349
+ A = np.array([[1, 8]])
350
+ b = np.array([-16])
351
+ _, _, Y = projections(A)
352
+ newton_point = np.array([0.24615385, 1.96923077])
353
+
354
+ # Newton point inside boundaries
355
+ x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
356
+ assert_array_almost_equal(x, newton_point)
357
+
358
+ # Spherical constraint active
359
+ x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
360
+ assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
361
+
362
+ # Box constraints active
363
+ x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
364
+ assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
365
+
366
+ def test_3d_example(self):
367
+ A = np.array([[1, 8, 1],
368
+ [4, 2, 2]])
369
+ b = np.array([-16, 2])
370
+ Z, LS, Y = projections(A)
371
+
372
+ newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
373
+ cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
374
+ origin = np.zeros_like(newton_point)
375
+
376
+ # newton_point inside boundaries
377
+ x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
378
+ [np.inf, np.inf, np.inf])
379
+ assert_array_almost_equal(x, newton_point)
380
+
381
+ # line between cauchy_point and newton_point contains best point
382
+ # (spherical constraint is active).
383
+ x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
384
+ [np.inf, np.inf, np.inf])
385
+ z = cauchy_point
386
+ d = newton_point-cauchy_point
387
+ t = ((x-z)/(d))
388
+ assert_array_almost_equal(t, np.full(3, 0.40807330))
389
+ assert_array_almost_equal(np.linalg.norm(x), 2)
390
+
391
+ # line between cauchy_point and newton_point contains best point
392
+ # (box constraint is active).
393
+ x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
394
+ [np.inf, np.inf, np.inf])
395
+ z = cauchy_point
396
+ d = newton_point-cauchy_point
397
+ t = ((x-z)/(d))
398
+ assert_array_almost_equal(t, np.full(3, 0.7498195))
399
+ assert_array_almost_equal(x[0], -1)
400
+
401
+ # line between origin and cauchy_point contains best point
402
+ # (spherical constraint is active).
403
+ x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
404
+ [np.inf, np.inf, np.inf])
405
+ z = origin
406
+ d = cauchy_point
407
+ t = ((x-z)/(d))
408
+ assert_array_almost_equal(t, np.full(3, 0.573936265))
409
+ assert_array_almost_equal(np.linalg.norm(x), 1)
410
+
411
+ # line between origin and newton_point contains best point
412
+ # (box constraint is active).
413
+ x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
414
+ [np.inf, 1, np.inf])
415
+ z = origin
416
+ d = newton_point
417
+ t = ((x-z)/(d))
418
+ assert_array_almost_equal(t, np.full(3, 0.4478827364))
419
+ assert_array_almost_equal(x[1], 1)
420
+
421
+
422
+ class TestProjectCG(TestCase):
423
+
424
+ # From Example 16.2 Nocedal/Wright "Numerical
425
+ # Optimization" p.452.
426
+ def test_nocedal_example(self):
427
+ H = csc_matrix([[6, 2, 1],
428
+ [2, 5, 2],
429
+ [1, 2, 4]])
430
+ A = csc_matrix([[1, 0, 1],
431
+ [0, 1, 1]])
432
+ c = np.array([-8, -3, -3])
433
+ b = -np.array([3, 0])
434
+ Z, _, Y = projections(A)
435
+ x, info = projected_cg(H, c, Z, Y, b)
436
+ assert_equal(info["stop_cond"], 4)
437
+ assert_equal(info["hits_boundary"], False)
438
+ assert_array_almost_equal(x, [2, -1, 1])
439
+
440
+ def test_compare_with_direct_fact(self):
441
+ H = csc_matrix([[6, 2, 1, 3],
442
+ [2, 5, 2, 4],
443
+ [1, 2, 4, 5],
444
+ [3, 4, 5, 7]])
445
+ A = csc_matrix([[1, 0, 1, 0],
446
+ [0, 1, 1, 1]])
447
+ c = np.array([-2, -3, -3, 1])
448
+ b = -np.array([3, 0])
449
+ Z, _, Y = projections(A)
450
+ x, info = projected_cg(H, c, Z, Y, b, tol=0)
451
+ x_kkt, _ = eqp_kktfact(H, c, A, b)
452
+ assert_equal(info["stop_cond"], 1)
453
+ assert_equal(info["hits_boundary"], False)
454
+ assert_array_almost_equal(x, x_kkt)
455
+
456
+ def test_trust_region_infeasible(self):
457
+ H = csc_matrix([[6, 2, 1, 3],
458
+ [2, 5, 2, 4],
459
+ [1, 2, 4, 5],
460
+ [3, 4, 5, 7]])
461
+ A = csc_matrix([[1, 0, 1, 0],
462
+ [0, 1, 1, 1]])
463
+ c = np.array([-2, -3, -3, 1])
464
+ b = -np.array([3, 0])
465
+ trust_radius = 1
466
+ Z, _, Y = projections(A)
467
+ with pytest.raises(ValueError):
468
+ projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
469
+
470
+ def test_trust_region_barely_feasible(self):
471
+ H = csc_matrix([[6, 2, 1, 3],
472
+ [2, 5, 2, 4],
473
+ [1, 2, 4, 5],
474
+ [3, 4, 5, 7]])
475
+ A = csc_matrix([[1, 0, 1, 0],
476
+ [0, 1, 1, 1]])
477
+ c = np.array([-2, -3, -3, 1])
478
+ b = -np.array([3, 0])
479
+ trust_radius = 2.32379000772445021283
480
+ Z, _, Y = projections(A)
481
+ x, info = projected_cg(H, c, Z, Y, b,
482
+ tol=0,
483
+ trust_radius=trust_radius)
484
+ assert_equal(info["stop_cond"], 2)
485
+ assert_equal(info["hits_boundary"], True)
486
+ assert_array_almost_equal(np.linalg.norm(x), trust_radius)
487
+ assert_array_almost_equal(x, -Y.dot(b))
488
+
489
+ def test_hits_boundary(self):
490
+ H = csc_matrix([[6, 2, 1, 3],
491
+ [2, 5, 2, 4],
492
+ [1, 2, 4, 5],
493
+ [3, 4, 5, 7]])
494
+ A = csc_matrix([[1, 0, 1, 0],
495
+ [0, 1, 1, 1]])
496
+ c = np.array([-2, -3, -3, 1])
497
+ b = -np.array([3, 0])
498
+ trust_radius = 3
499
+ Z, _, Y = projections(A)
500
+ x, info = projected_cg(H, c, Z, Y, b,
501
+ tol=0,
502
+ trust_radius=trust_radius)
503
+ assert_equal(info["stop_cond"], 2)
504
+ assert_equal(info["hits_boundary"], True)
505
+ assert_array_almost_equal(np.linalg.norm(x), trust_radius)
506
+
507
+ def test_negative_curvature_unconstrained(self):
508
+ H = csc_matrix([[1, 2, 1, 3],
509
+ [2, 0, 2, 4],
510
+ [1, 2, 0, 2],
511
+ [3, 4, 2, 0]])
512
+ A = csc_matrix([[1, 0, 1, 0],
513
+ [0, 1, 0, 1]])
514
+ c = np.array([-2, -3, -3, 1])
515
+ b = -np.array([3, 0])
516
+ Z, _, Y = projections(A)
517
+ with pytest.raises(ValueError):
518
+ projected_cg(H, c, Z, Y, b, tol=0)
519
+
520
+ def test_negative_curvature(self):
521
+ H = csc_matrix([[1, 2, 1, 3],
522
+ [2, 0, 2, 4],
523
+ [1, 2, 0, 2],
524
+ [3, 4, 2, 0]])
525
+ A = csc_matrix([[1, 0, 1, 0],
526
+ [0, 1, 0, 1]])
527
+ c = np.array([-2, -3, -3, 1])
528
+ b = -np.array([3, 0])
529
+ Z, _, Y = projections(A)
530
+ trust_radius = 1000
531
+ x, info = projected_cg(H, c, Z, Y, b,
532
+ tol=0,
533
+ trust_radius=trust_radius)
534
+ assert_equal(info["stop_cond"], 3)
535
+ assert_equal(info["hits_boundary"], True)
536
+ assert_array_almost_equal(np.linalg.norm(x), trust_radius)
537
+
538
+ # The box constraints are inactive at the solution but
539
+ # are active during the iterations.
540
+ def test_inactive_box_constraints(self):
541
+ H = csc_matrix([[6, 2, 1, 3],
542
+ [2, 5, 2, 4],
543
+ [1, 2, 4, 5],
544
+ [3, 4, 5, 7]])
545
+ A = csc_matrix([[1, 0, 1, 0],
546
+ [0, 1, 1, 1]])
547
+ c = np.array([-2, -3, -3, 1])
548
+ b = -np.array([3, 0])
549
+ Z, _, Y = projections(A)
550
+ x, info = projected_cg(H, c, Z, Y, b,
551
+ tol=0,
552
+ lb=[0.5, -np.inf,
553
+ -np.inf, -np.inf],
554
+ return_all=True)
555
+ x_kkt, _ = eqp_kktfact(H, c, A, b)
556
+ assert_equal(info["stop_cond"], 1)
557
+ assert_equal(info["hits_boundary"], False)
558
+ assert_array_almost_equal(x, x_kkt)
559
+
560
+ # The box constraints active and the termination is
561
+ # by maximum iterations (infeasible interaction).
562
+ def test_active_box_constraints_maximum_iterations_reached(self):
563
+ H = csc_matrix([[6, 2, 1, 3],
564
+ [2, 5, 2, 4],
565
+ [1, 2, 4, 5],
566
+ [3, 4, 5, 7]])
567
+ A = csc_matrix([[1, 0, 1, 0],
568
+ [0, 1, 1, 1]])
569
+ c = np.array([-2, -3, -3, 1])
570
+ b = -np.array([3, 0])
571
+ Z, _, Y = projections(A)
572
+ x, info = projected_cg(H, c, Z, Y, b,
573
+ tol=0,
574
+ lb=[0.8, -np.inf,
575
+ -np.inf, -np.inf],
576
+ return_all=True)
577
+ assert_equal(info["stop_cond"], 1)
578
+ assert_equal(info["hits_boundary"], True)
579
+ assert_array_almost_equal(A.dot(x), -b)
580
+ assert_array_almost_equal(x[0], 0.8)
581
+
582
+ # The box constraints are active and the termination is
583
+ # because it hits boundary (without infeasible interaction).
584
+ def test_active_box_constraints_hits_boundaries(self):
585
+ H = csc_matrix([[6, 2, 1, 3],
586
+ [2, 5, 2, 4],
587
+ [1, 2, 4, 5],
588
+ [3, 4, 5, 7]])
589
+ A = csc_matrix([[1, 0, 1, 0],
590
+ [0, 1, 1, 1]])
591
+ c = np.array([-2, -3, -3, 1])
592
+ b = -np.array([3, 0])
593
+ trust_radius = 3
594
+ Z, _, Y = projections(A)
595
+ x, info = projected_cg(H, c, Z, Y, b,
596
+ tol=0,
597
+ ub=[np.inf, np.inf, 1.6, np.inf],
598
+ trust_radius=trust_radius,
599
+ return_all=True)
600
+ assert_equal(info["stop_cond"], 2)
601
+ assert_equal(info["hits_boundary"], True)
602
+ assert_array_almost_equal(x[2], 1.6)
603
+
604
+ # The box constraints are active and the termination is
605
+ # because it hits boundary (infeasible interaction).
606
+ def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
607
+ H = csc_matrix([[6, 2, 1, 3],
608
+ [2, 5, 2, 4],
609
+ [1, 2, 4, 5],
610
+ [3, 4, 5, 7]])
611
+ A = csc_matrix([[1, 0, 1, 0],
612
+ [0, 1, 1, 1]])
613
+ c = np.array([-2, -3, -3, 1])
614
+ b = -np.array([3, 0])
615
+ trust_radius = 4
616
+ Z, _, Y = projections(A)
617
+ x, info = projected_cg(H, c, Z, Y, b,
618
+ tol=0,
619
+ ub=[np.inf, 0.1, np.inf, np.inf],
620
+ trust_radius=trust_radius,
621
+ return_all=True)
622
+ assert_equal(info["stop_cond"], 2)
623
+ assert_equal(info["hits_boundary"], True)
624
+ assert_array_almost_equal(x[1], 0.1)
625
+
626
+ # The box constraints are active and the termination is
627
+ # because it hits boundary (no infeasible interaction).
628
+ def test_active_box_constraints_negative_curvature(self):
629
+ H = csc_matrix([[1, 2, 1, 3],
630
+ [2, 0, 2, 4],
631
+ [1, 2, 0, 2],
632
+ [3, 4, 2, 0]])
633
+ A = csc_matrix([[1, 0, 1, 0],
634
+ [0, 1, 0, 1]])
635
+ c = np.array([-2, -3, -3, 1])
636
+ b = -np.array([3, 0])
637
+ Z, _, Y = projections(A)
638
+ trust_radius = 1000
639
+ x, info = projected_cg(H, c, Z, Y, b,
640
+ tol=0,
641
+ ub=[np.inf, np.inf, 100, np.inf],
642
+ trust_radius=trust_radius)
643
+ assert_equal(info["stop_cond"], 3)
644
+ assert_equal(info["hits_boundary"], True)
645
+ assert_array_almost_equal(x[2], 100)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.optimize import minimize, Bounds
3
+
4
+ def test_gh10880():
5
+ # checks that verbose reporting works with trust-constr for
6
+ # bound-contrained problems
7
+ bnds = Bounds(1, 2)
8
+ opts = {'maxiter': 1000, 'verbose': 2}
9
+ minimize(lambda x: x**2, x0=2., method='trust-constr',
10
+ bounds=bnds, options=opts)
11
+
12
+ opts = {'maxiter': 1000, 'verbose': 3}
13
+ minimize(lambda x: x**2, x0=2., method='trust-constr',
14
+ bounds=bnds, options=opts)
15
+
16
+ def test_gh12922():
17
+ # checks that verbose reporting works with trust-constr for
18
+ # general constraints
19
+ def objective(x):
20
+ return np.array([(np.sum((x+1)**4))])
21
+
22
+ cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2}
23
+ n = 25
24
+ x0 = np.linspace(-5, 5, n)
25
+
26
+ opts = {'maxiter': 1000, 'verbose': 2}
27
+ minimize(objective, x0=x0, method='trust-constr',
28
+ constraints=cons, options=opts)
29
+
30
+ opts = {'maxiter': 1000, 'verbose': 3}
31
+ minimize(objective, x0=x0, method='trust-constr',
32
+ constraints=cons, options=opts)
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Trust-region interior point method.
2
+
3
+ References
4
+ ----------
5
+ .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
6
+ "An interior point algorithm for large-scale nonlinear
7
+ programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
8
+ .. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal.
9
+ "On the local behavior of an interior point method for
10
+ nonlinear programming." Numerical analysis 1997 (1997): 37-56.
11
+ .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
12
+ Second Edition (2006).
13
+ """
14
+
15
+ import scipy.sparse as sps
16
+ import numpy as np
17
+ from .equality_constrained_sqp import equality_constrained_sqp
18
+ from scipy.sparse.linalg import LinearOperator
19
+
20
+ __all__ = ['tr_interior_point']
21
+
22
+
23
+ class BarrierSubproblem:
24
+ """
25
+ Barrier optimization problem:
26
+ minimize fun(x) - barrier_parameter*sum(log(s))
27
+ subject to: constr_eq(x) = 0
28
+ constr_ineq(x) + s = 0
29
+ """
30
+
31
+ def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
32
+ constr, jac, barrier_parameter, tolerance,
33
+ enforce_feasibility, global_stop_criteria,
34
+ xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0,
35
+ jac_eq0):
36
+ # Store parameters
37
+ self.n_vars = n_vars
38
+ self.x0 = x0
39
+ self.s0 = s0
40
+ self.fun = fun
41
+ self.grad = grad
42
+ self.lagr_hess = lagr_hess
43
+ self.constr = constr
44
+ self.jac = jac
45
+ self.barrier_parameter = barrier_parameter
46
+ self.tolerance = tolerance
47
+ self.n_eq = n_eq
48
+ self.n_ineq = n_ineq
49
+ self.enforce_feasibility = enforce_feasibility
50
+ self.global_stop_criteria = global_stop_criteria
51
+ self.xtol = xtol
52
+ self.fun0 = self._compute_function(fun0, constr_ineq0, s0)
53
+ self.grad0 = self._compute_gradient(grad0)
54
+ self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0)
55
+ self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0)
56
+ self.terminate = False
57
+
58
+ def update(self, barrier_parameter, tolerance):
59
+ self.barrier_parameter = barrier_parameter
60
+ self.tolerance = tolerance
61
+
62
+ def get_slack(self, z):
63
+ return z[self.n_vars:self.n_vars+self.n_ineq]
64
+
65
+ def get_variables(self, z):
66
+ return z[:self.n_vars]
67
+
68
+ def function_and_constraints(self, z):
69
+ """Returns barrier function and constraints at given point.
70
+
71
+ For z = [x, s], returns barrier function:
72
+ function(z) = fun(x) - barrier_parameter*sum(log(s))
73
+ and barrier constraints:
74
+ constraints(z) = [ constr_eq(x) ]
75
+ [ constr_ineq(x) + s ]
76
+
77
+ """
78
+ # Get variables and slack variables
79
+ x = self.get_variables(z)
80
+ s = self.get_slack(z)
81
+ # Compute function and constraints
82
+ f = self.fun(x)
83
+ c_eq, c_ineq = self.constr(x)
84
+ # Return objective function and constraints
85
+ return (self._compute_function(f, c_ineq, s),
86
+ self._compute_constr(c_ineq, c_eq, s))
87
+
88
+ def _compute_function(self, f, c_ineq, s):
89
+ # Use technique from Nocedal and Wright book, ref [3]_, p.576,
90
+ # to guarantee constraints from `enforce_feasibility`
91
+ # stay feasible along iterations.
92
+ s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility]
93
+ log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s]
94
+ # Compute barrier objective function
95
+ return f - self.barrier_parameter*np.sum(log_s)
96
+
97
+ def _compute_constr(self, c_ineq, c_eq, s):
98
+ # Compute barrier constraint
99
+ return np.hstack((c_eq,
100
+ c_ineq + s))
101
+
102
+ def scaling(self, z):
103
+ """Returns scaling vector.
104
+ Given by:
105
+ scaling = [ones(n_vars), s]
106
+ """
107
+ s = self.get_slack(z)
108
+ diag_elements = np.hstack((np.ones(self.n_vars), s))
109
+
110
+ # Diagonal matrix
111
+ def matvec(vec):
112
+ return diag_elements*vec
113
+ return LinearOperator((self.n_vars+self.n_ineq,
114
+ self.n_vars+self.n_ineq),
115
+ matvec)
116
+
117
+ def gradient_and_jacobian(self, z):
118
+ """Returns scaled gradient.
119
+
120
+ Return scaled gradient:
121
+ gradient = [ grad(x) ]
122
+ [ -barrier_parameter*ones(n_ineq) ]
123
+ and scaled Jacobian matrix:
124
+ jacobian = [ jac_eq(x) 0 ]
125
+ [ jac_ineq(x) S ]
126
+ Both of them scaled by the previously defined scaling factor.
127
+ """
128
+ # Get variables and slack variables
129
+ x = self.get_variables(z)
130
+ s = self.get_slack(z)
131
+ # Compute first derivatives
132
+ g = self.grad(x)
133
+ J_eq, J_ineq = self.jac(x)
134
+ # Return gradient and Jacobian
135
+ return (self._compute_gradient(g),
136
+ self._compute_jacobian(J_eq, J_ineq, s))
137
+
138
+ def _compute_gradient(self, g):
139
+ return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq)))
140
+
141
+ def _compute_jacobian(self, J_eq, J_ineq, s):
142
+ if self.n_ineq == 0:
143
+ return J_eq
144
+ else:
145
+ if sps.issparse(J_eq) or sps.issparse(J_ineq):
146
+ # It is expected that J_eq and J_ineq
147
+ # are already `csr_matrix` because of
148
+ # the way ``BoxConstraint``, ``NonlinearConstraint``
149
+ # and ``LinearConstraint`` are defined.
150
+ J_eq = sps.csr_matrix(J_eq)
151
+ J_ineq = sps.csr_matrix(J_ineq)
152
+ return self._assemble_sparse_jacobian(J_eq, J_ineq, s)
153
+ else:
154
+ S = np.diag(s)
155
+ zeros = np.zeros((self.n_eq, self.n_ineq))
156
+ # Convert to matrix
157
+ if sps.issparse(J_ineq):
158
+ J_ineq = J_ineq.toarray()
159
+ if sps.issparse(J_eq):
160
+ J_eq = J_eq.toarray()
161
+ # Concatenate matrices
162
+ return np.block([[J_eq, zeros],
163
+ [J_ineq, S]])
164
+
165
+ def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):
166
+ """Assemble sparse Jacobian given its components.
167
+
168
+ Given ``J_eq``, ``J_ineq`` and ``s`` returns:
169
+ jacobian = [ J_eq, 0 ]
170
+ [ J_ineq, diag(s) ]
171
+
172
+ It is equivalent to:
173
+ sps.bmat([[ J_eq, None ],
174
+ [ J_ineq, diag(s) ]], "csr")
175
+ but significantly more efficient for this
176
+ given structure.
177
+ """
178
+ n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq
179
+ J_aux = sps.vstack([J_eq, J_ineq], "csr")
180
+ indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data
181
+ new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int),
182
+ np.arange(n_ineq+1, dtype=int)))
183
+ size = indices.size+n_ineq
184
+ new_indices = np.empty(size)
185
+ new_data = np.empty(size)
186
+ mask = np.full(size, False, bool)
187
+ mask[new_indptr[-n_ineq:]-1] = True
188
+ new_indices[mask] = n_vars+np.arange(n_ineq)
189
+ new_indices[~mask] = indices
190
+ new_data[mask] = s
191
+ new_data[~mask] = data
192
+ J = sps.csr_matrix((new_data, new_indices, new_indptr),
193
+ (n_eq + n_ineq, n_vars + n_ineq))
194
+ return J
195
+
196
+ def lagrangian_hessian_x(self, z, v):
197
+ """Returns Lagrangian Hessian (in relation to `x`) -> Hx"""
198
+ x = self.get_variables(z)
199
+ # Get lagrange multipliers related to nonlinear equality constraints
200
+ v_eq = v[:self.n_eq]
201
+ # Get lagrange multipliers related to nonlinear ineq. constraints
202
+ v_ineq = v[self.n_eq:self.n_eq+self.n_ineq]
203
+ lagr_hess = self.lagr_hess
204
+ return lagr_hess(x, v_eq, v_ineq)
205
+
206
+ def lagrangian_hessian_s(self, z, v):
207
+ """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S"""
208
+ s = self.get_slack(z)
209
+ # Using the primal formulation:
210
+ # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s).
211
+ # Reference [1]_ p. 882, formula (3.1)
212
+ primal = self.barrier_parameter
213
+ # Using the primal-dual formulation
214
+ # S Hs S = diag(s)*diag(v/s)*diag(s)
215
+ # Reference [1]_ p. 883, formula (3.11)
216
+ primal_dual = v[-self.n_ineq:]*s
217
+ # Uses the primal-dual formulation for
218
+ # positives values of v_ineq, and primal
219
+ # formulation for the remaining ones.
220
+ return np.where(v[-self.n_ineq:] > 0, primal_dual, primal)
221
+
222
+ def lagrangian_hessian(self, z, v):
223
+ """Returns scaled Lagrangian Hessian"""
224
+ # Compute Hessian in relation to x and s
225
+ Hx = self.lagrangian_hessian_x(z, v)
226
+ if self.n_ineq > 0:
227
+ S_Hs_S = self.lagrangian_hessian_s(z, v)
228
+
229
+ # The scaled Lagragian Hessian is:
230
+ # [ Hx 0 ]
231
+ # [ 0 S Hs S ]
232
+ def matvec(vec):
233
+ vec_x = self.get_variables(vec)
234
+ vec_s = self.get_slack(vec)
235
+ if self.n_ineq > 0:
236
+ return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s))
237
+ else:
238
+ return Hx.dot(vec_x)
239
+ return LinearOperator((self.n_vars+self.n_ineq,
240
+ self.n_vars+self.n_ineq),
241
+ matvec)
242
+
243
+ def stop_criteria(self, state, z, last_iteration_failed,
244
+ optimality, constr_violation,
245
+ trust_radius, penalty, cg_info):
246
+ """Stop criteria to the barrier problem.
247
+ The criteria here proposed is similar to formula (2.3)
248
+ from [1]_, p.879.
249
+ """
250
+ x = self.get_variables(z)
251
+ if self.global_stop_criteria(state, x,
252
+ last_iteration_failed,
253
+ trust_radius, penalty,
254
+ cg_info,
255
+ self.barrier_parameter,
256
+ self.tolerance):
257
+ self.terminate = True
258
+ return True
259
+ else:
260
+ g_cond = (optimality < self.tolerance and
261
+ constr_violation < self.tolerance)
262
+ x_cond = trust_radius < self.xtol
263
+ return g_cond or x_cond
264
+
265
+
266
+ def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
267
+ constr, jac, x0, fun0, grad0,
268
+ constr_ineq0, jac_ineq0, constr_eq0,
269
+ jac_eq0, stop_criteria,
270
+ enforce_feasibility, xtol, state,
271
+ initial_barrier_parameter,
272
+ initial_tolerance,
273
+ initial_penalty,
274
+ initial_trust_radius,
275
+ factorization_method):
276
+ """Trust-region interior points method.
277
+
278
+ Solve problem:
279
+ minimize fun(x)
280
+ subject to: constr_ineq(x) <= 0
281
+ constr_eq(x) = 0
282
+ using trust-region interior point method described in [1]_.
283
+ """
284
+ # BOUNDARY_PARAMETER controls the decrease on the slack
285
+ # variables. Represents ``tau`` from [1]_ p.885, formula (3.18).
286
+ BOUNDARY_PARAMETER = 0.995
287
+ # BARRIER_DECAY_RATIO controls the decay of the barrier parameter
288
+ # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879.
289
+ BARRIER_DECAY_RATIO = 0.2
290
+ # TRUST_ENLARGEMENT controls the enlargement on trust radius
291
+ # after each iteration
292
+ TRUST_ENLARGEMENT = 5
293
+
294
+ # Default enforce_feasibility
295
+ if enforce_feasibility is None:
296
+ enforce_feasibility = np.zeros(n_ineq, bool)
297
+ # Initial Values
298
+ barrier_parameter = initial_barrier_parameter
299
+ tolerance = initial_tolerance
300
+ trust_radius = initial_trust_radius
301
+ # Define initial value for the slack variables
302
+ s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq))
303
+ # Define barrier subproblem
304
+ subprob = BarrierSubproblem(
305
+ x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac,
306
+ barrier_parameter, tolerance, enforce_feasibility,
307
+ stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0,
308
+ constr_eq0, jac_eq0)
309
+ # Define initial parameter for the first iteration.
310
+ z = np.hstack((x0, s0))
311
+ fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0
312
+ grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0
313
+ # Define trust region bounds
314
+ trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf),
315
+ np.full(subprob.n_ineq, -BOUNDARY_PARAMETER)))
316
+ trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf)
317
+
318
+ # Solves a sequence of barrier problems
319
+ while True:
320
+ # Solve SQP subproblem
321
+ z, state = equality_constrained_sqp(
322
+ subprob.function_and_constraints,
323
+ subprob.gradient_and_jacobian,
324
+ subprob.lagrangian_hessian,
325
+ z, fun0_subprob, grad0_subprob,
326
+ constr0_subprob, jac0_subprob, subprob.stop_criteria,
327
+ state, initial_penalty, trust_radius,
328
+ factorization_method, trust_lb, trust_ub, subprob.scaling)
329
+ if subprob.terminate:
330
+ break
331
+ # Update parameters
332
+ trust_radius = max(initial_trust_radius,
333
+ TRUST_ENLARGEMENT*state.tr_radius)
334
+ # TODO: Use more advanced strategies from [2]_
335
+ # to update this parameters.
336
+ barrier_parameter *= BARRIER_DECAY_RATIO
337
+ tolerance *= BARRIER_DECAY_RATIO
338
+ # Update Barrier Problem
339
+ subprob.update(barrier_parameter, tolerance)
340
+ # Compute initial values for next iteration
341
+ fun0_subprob, constr0_subprob = subprob.function_and_constraints(z)
342
+ grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z)
343
+
344
+ # Get x and s
345
+ x = subprob.get_variables(z)
346
+ return x, state
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc ADDED
Binary file (49.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc ADDED
Binary file (8.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc ADDED
Binary file (7.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc ADDED
Binary file (6.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc ADDED
Binary file (7.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc ADDED
Binary file (9.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc ADDED
Binary file (15.7 kB). View file