diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py new file mode 100644 index 0000000000000000000000000000000000000000..8f34ead4a1fc4edbb3c2ab50a204aa9a3cc21cff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py @@ -0,0 +1,183 @@ +"""Bounded-variable least-squares algorithm.""" +import numpy as np +from numpy.linalg import norm, lstsq +from scipy.optimize import OptimizeResult + +from .common import print_header_linear, print_iteration_linear + + +def compute_kkt_optimality(g, on_bound): + """Compute the maximum violation of KKT conditions.""" + g_kkt = g * on_bound + free_set = on_bound == 0 + g_kkt[free_set] = np.abs(g[free_set]) + return np.max(g_kkt) + + +def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None): + m, n = A.shape + + x = x_lsq.copy() + on_bound = np.zeros(n) + + mask = x <= lb + x[mask] = lb[mask] + on_bound[mask] = -1 + + mask = x >= ub + x[mask] = ub[mask] + on_bound[mask] = 1 + + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + r = A.dot(x) - b + cost = 0.5 * np.dot(r, r) + initial_cost = cost + g = A.T.dot(r) + + cost_change = None + step_norm = None + iteration = 0 + + if verbose == 2: + print_header_linear() + + # This is the initialization loop. The requirement is that the + # least-squares solution on free variables is feasible before BVLS starts. + # One possible initialization is to set all variables to lower or upper + # bounds, but many iterations may be required from this state later on. + # The implemented ad-hoc procedure which intuitively should give a better + # initial state: find the least-squares solution on current free variables, + # if its feasible then stop, otherwise, set violating variables to + # corresponding bounds and continue on the reduced set of free variables. + + while free_set.size > 0: + if verbose == 2: + optimality = compute_kkt_optimality(g, on_bound) + print_iteration_linear(iteration, cost, cost_change, step_norm, + optimality) + + iteration += 1 + x_free_old = x[free_set].copy() + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=rcond)[0] + + lbv = z < lb[free_set] + ubv = z > ub[free_set] + v = lbv | ubv + + if np.any(lbv): + ind = free_set[lbv] + x[ind] = lb[ind] + active_set[ind] = True + on_bound[ind] = -1 + + if np.any(ubv): + ind = free_set[ubv] + x[ind] = ub[ind] + active_set[ind] = True + on_bound[ind] = 1 + + ind = free_set[~v] + x[ind] = z[~v] + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + cost = cost_new + g = A.T.dot(r) + step_norm = norm(x[free_set] - x_free_old) + + if np.any(v): + free_set = free_set[~v] + else: + break + + if max_iter is None: + max_iter = n + max_iter += iteration + + termination_status = None + + # Main BVLS loop. + + optimality = compute_kkt_optimality(g, on_bound) + for iteration in range(iteration, max_iter): # BVLS Loop A + if verbose == 2: + print_iteration_linear(iteration, cost, cost_change, + step_norm, optimality) + + if optimality < tol: + termination_status = 1 + + if termination_status is not None: + break + + move_to_free = np.argmax(g * on_bound) + on_bound[move_to_free] = 0 + + while True: # BVLS Loop B + + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + x_free = x[free_set] + x_free_old = x_free.copy() + lb_free = lb[free_set] + ub_free = ub[free_set] + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=rcond)[0] + + lbv, = np.nonzero(z < lb_free) + ubv, = np.nonzero(z > ub_free) + v = np.hstack((lbv, ubv)) + + if v.size > 0: + alphas = np.hstack(( + lb_free[lbv] - x_free[lbv], + ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v]) + + i = np.argmin(alphas) + i_free = v[i] + alpha = alphas[i] + + x_free *= 1 - alpha + x_free += alpha * z + x[free_set] = x_free + + if i < lbv.size: + on_bound[free_set[i_free]] = -1 + else: + on_bound[free_set[i_free]] = 1 + else: + x_free = z + x[free_set] = x_free + break + + step_norm = norm(x_free - x_free_old) + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + + if cost_change < tol * cost: + termination_status = 2 + cost = cost_new + + g = A.T.dot(r) + optimality = compute_kkt_optimality(g, on_bound) + + if termination_status is None: + termination_status = 0 + + return OptimizeResult( + x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound, + nit=iteration + 1, status=termination_status, + initial_cost=initial_cost) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fe54821d9e631ec5a1c8da8b3057ad997af8de00 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b342a6ab1eac999765a861df387cc9fb390d6f95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25741e7825d3bcd6cf703188bb97ba45f4af7b70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b671edecf0a0e8902bfcd47cf9099f07ec6a41 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..e725c00cc6d238008afb333b1cee9e3fc5400caa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py @@ -0,0 +1,1225 @@ +"""Base classes for low memory simplicial complex structures.""" +import copy +import logging +import itertools +import decimal +from functools import cache + +import numpy + +from ._vertex import (VertexCacheField, VertexCacheIndex) + + +class Complex: + """ + Base class for a simplicial complex described as a cache of vertices + together with their connections. + + Important methods: + Domain triangulation: + Complex.triangulate, Complex.split_generation + Triangulating arbitrary points (must be traingulable, + may exist outside domain): + Complex.triangulate(sample_set) + Converting another simplicial complex structure data type to the + structure used in Complex (ex. OBJ wavefront) + Complex.convert(datatype, data) + + Important objects: + HC.V: The cache of vertices and their connection + HC.H: Storage structure of all vertex groups + + Parameters + ---------- + dim : int + Spatial dimensionality of the complex R^dim + domain : list of tuples, optional + The bounds [x_l, x_u]^dim of the hyperrectangle space + ex. The default domain is the hyperrectangle [0, 1]^dim + Note: The domain must be convex, non-convex spaces can be cut + away from this domain using the non-linear + g_cons functions to define any arbitrary domain + (these domains may also be disconnected from each other) + sfield : + A scalar function defined in the associated domain f: R^dim --> R + sfield_args : tuple + Additional arguments to be passed to `sfield` + vfield : + A scalar function defined in the associated domain + f: R^dim --> R^m + (for example a gradient function of the scalar field) + vfield_args : tuple + Additional arguments to be passed to vfield + symmetry : None or list + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be + non-negative.constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (unused). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + """ + def __init__(self, dim, domain=None, sfield=None, sfield_args=(), + symmetry=None, constraints=None, workers=1): + self.dim = dim + + # Domains + self.domain = domain + if domain is None: + self.bounds = [(0.0, 1.0), ] * dim + else: + self.bounds = domain + self.symmetry = symmetry + # here in init to avoid if checks + + # Field functions + self.sfield = sfield + self.sfield_args = sfield_args + + # Process constraints + # Constraints + # Process constraint dict sequence: + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + if not isinstance(constraints, (tuple, list)): + constraints = (constraints,) + + for cons in constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Homology properties + self.gen = 0 + self.perm_cycle = 0 + + # Every cell is stored in a list of its generation, + # ex. the initial cell is stored in self.H[0] + # 1st get new cells are stored in self.H[1] etc. + # When a cell is sub-generated it is removed from this list + + self.H = [] # Storage structure of vertex groups + + # Cache of all vertices + if (sfield is not None) or (self.g_cons is not None): + # Initiate a vertex cache and an associated field cache, note that + # the field case is always initiated inside the vertex cache if an + # associated field scalar field is defined: + if sfield is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + elif self.g_cons is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + else: + self.V = VertexCacheIndex() + + self.V_non_symm = [] # List of non-symmetric vertices + + def __call__(self): + return self.H + + # %% Triangulation methods + def cyclic_product(self, bounds, origin, supremum, centroid=True): + """Generate initial triangulation using cyclic product""" + # Define current hyperrectangle + vot = tuple(origin) + vut = tuple(supremum) # Hyperrectangle supremum + self.V[vot] + vo = self.V[vot] + yield vo.x + self.V[vut].connect(self.V[vot]) + yield vut + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + C0x = [[self.V[vot]]] + a_vo = copy.copy(list(origin)) + a_vo[0] = vut[0] # Update aN Origin + a_vo = self.V[tuple(a_vo)] + # self.V[vot].connect(self.V[tuple(a_vo)]) + self.V[vot].connect(a_vo) + yield a_vo.x + C1x = [[a_vo]] + # C1x = [[self.V[tuple(a_vo)]]] + ab_C = [] # Container for a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + C0x.append([]) + C1x.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + x[1] + # Copy lists for iteration + cC0x = [x[:] for x in C0x[:i + 1]] + cC1x = [x[:] for x in C1x[:i + 1]] + for j, (VL, VU) in enumerate(zip(cC0x, cC1x)): + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + + # Connect vertices in N to corresponding vertices + # in aN: + vl.connect(a_vl) + + yield a_vl.x + + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + + # Connect new vertex pair in aN: + a_vl.connect(a_vu) + + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + + # Update the containers + C0x[i + 1].append(vl) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vl) + C1x[i + 1].append(a_vu) + + # Update old containers + C0x[j].append(a_vl) + C1x[j].append(a_vu) + + # Yield new points + yield a_vu.x + + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) + + for vp in ab_Cc: + b_v = list(vp[0].x) + ab_v = list(vp[1].x) + b_v[i + 1] = vut[i + 1] + ab_v[i + 1] = vut[i + 1] + b_v = self.V[tuple(b_v)] # b + vl + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + b_v.connect(ab_v) # s-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + ab_C.append((b_v, ab_v)) + + except IndexError: + cC0x = C0x[i] + cC1x = C1x[i] + VL, VU = cC0x, cC1x + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + # Connect new vertex pair in aN: + # a_vl.connect(a_vu) + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vu) + # Yield new points + a_vu.connect(self.V[vut]) + yield a_vu.x + ab_Cc = copy.copy(ab_C) + for vp in ab_Cc: + if vp[1].x[i] == vut[i]: + ab_v = list(vp[1].x) + ab_v[i + 1] = vut[i + 1] + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + + # Clean class trash + try: + del C0x + del cC0x + del C1x + del cC1x + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + # Extra yield to ensure that the triangulation is completed + if centroid: + vo = self.V[vot] + vs = self.V[vut] + # Disconnect the origin and supremum + vo.disconnect(vs) + # Build centroid + vc = self.split_edge(vot, vut) + for v in vo.nn: + v.connect(vc) + yield vc.x + return vc.x + else: + yield vut + return vut + + def triangulate(self, n=None, symmetry=None, centroid=True, + printout=False): + """ + Triangulate the initial domain, if n is not None then a limited number + of points will be generated + + Parameters + ---------- + n : int, Number of points to be sampled. + symmetry : + + Ex. Dictionary/hashtable + f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + symmetry = symmetry[0]: 0, # Variable 1 + symmetry[1]: 0, # symmetric to variable 1 + symmetry[2]: 0, # symmetric to variable 1 + symmetry[3]: 3, # Variable 4 + symmetry[4]: 3, # symmetric to variable 4 + symmetry[5]: 3, # symmetric to variable 4 + } + centroid : bool, if True add a central point to the hypercube + printout : bool, if True print out results + + NOTES: + ------ + Rather than using the combinatorial algorithm to connect vertices we + make the following observation: + + The bound pairs are similar a C2 cyclic group and the structure is + formed using the cartesian product: + + H = C2 x C2 x C2 ... x C2 (dim times) + + So construct any normal subgroup N and consider H/N first, we connect + all vertices within N (ex. N is C2 (the first dimension), then we move + to a left coset aN (an operation moving around the defined H/N group by + for example moving from the lower bound in C2 (dimension 2) to the + higher bound in C2. During this operation connection all the vertices. + Now repeat the N connections. Note that these elements can be connected + in parallel. + """ + # Inherit class arguments + if symmetry is None: + symmetry = self.symmetry + # Build origin and supremum vectors + origin = [i[0] for i in self.bounds] + self.origin = origin + supremum = [i[1] for i in self.bounds] + + self.supremum = supremum + + if symmetry is None: + cbounds = self.bounds + else: + cbounds = copy.copy(self.bounds) + for i, j in enumerate(symmetry): + if i is not j: + # pop second entry on second symmetry vars + cbounds[i] = [self.bounds[symmetry[i]][0]] + # Sole (first) entry is the sup value and there is no + # origin: + cbounds[i] = [self.bounds[symmetry[i]][1]] + if (self.bounds[symmetry[i]] is not + self.bounds[symmetry[j]]): + logging.warning(f"Variable {i} was specified as " + f"symmetetric to variable {j}, however" + f", the bounds {i} =" + f" {self.bounds[symmetry[i]]} and {j}" + f" =" + f" {self.bounds[symmetry[j]]} do not " + f"match, the mismatch was ignored in " + f"the initial triangulation.") + cbounds[i] = self.bounds[symmetry[j]] + + if n is None: + # Build generator + self.cp = self.cyclic_product(cbounds, origin, supremum, centroid) + for i in self.cp: + i + + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + else: + # Check if generator already exists + try: + self.cp + except (AttributeError, KeyError): + self.cp = self.cyclic_product(cbounds, origin, supremum, + centroid) + + try: + while len(self.V.cache) < n: + next(self.cp) + except StopIteration: + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + if printout: + # for v in self.C0(): + # v.print_out() + for v in self.V.cache: + self.V[v].print_out() + + return + + def refine(self, n=1): + if n is None: + try: + self.triangulated_vectors + self.refine_all() + return + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry) + return + else: + raise + + nt = len(self.V.cache) + n # Target number of total vertices + # In the outer while loop we iterate until we have added an extra `n` + # vertices to the complex: + while len(self.V.cache) < nt: # while loop 1 + try: # try 1 + # Try to access triangulated_vectors, this should only be + # defined if an initial triangulation has already been + # performed: + self.triangulated_vectors + # Try a usual iteration of the current generator, if it + # does not exist or is exhausted then produce a new generator + try: # try 2 + next(self.rls) + except (AttributeError, StopIteration, KeyError): + vp = self.triangulated_vectors[0] + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + next(self.rls) + + except (AttributeError, KeyError): + # If an initial triangulation has not been completed, then + # we start/continue the initial triangulation targeting `nt` + # vertices, if nt is greater than the initial number of + # vertices then the `refine` routine will move back to try 1. + self.triangulate(nt, self.symmetry) + return + + def refine_all(self, centroids=True): + """Refine the entire domain of the current complex.""" + try: + self.triangulated_vectors + tvs = copy.copy(self.triangulated_vectors) + for i, vp in enumerate(tvs): + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + for i in self.rls: + i + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry, centroid=centroids) + else: + raise + + # This adds a centroid to every new sub-domain generated and defined + # by self.triangulated_vectors, in addition the vertices ! to complete + # the triangulation + return + + def refine_local_space(self, origin, supremum, bounds, centroid=1): + # Copy for later removal + origin_c = copy.copy(origin) + supremum_c = copy.copy(supremum) + + # Initiate local variables redefined in later inner `for` loop: + vl, vu, a_vu = None, None, None + + # Change the vector orientation so that it is only increasing + s_ov = list(origin) + s_origin = list(origin) + s_sv = list(supremum) + s_supremum = list(supremum) + for i, vi in enumerate(s_origin): + if s_ov[i] > s_sv[i]: + s_origin[i] = s_sv[i] + s_supremum[i] = s_ov[i] + + vot = tuple(s_origin) + vut = tuple(s_supremum) # Hyperrectangle supremum + + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + if tuple(a_vl) not in self.V.cache: + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + a_vl = self.V[tuple(a_vl)] + else: + a_vl = self.V[tuple(a_vl)] + + c_v = self.split_edge(vo.x, a_vl.x) + c_v.connect(vco) + yield c_v.x + Cox = [[vo]] + Ccx = [[c_v]] + Cux = [[a_vl]] + ab_C = [] # Container for a + b operations + s_ab_C = [] # Container for symmetric a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + Cox.append([]) + Ccx.append([]) + Cux.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + t_a_vl = list(vot) + t_a_vl[i + 1] = vut[i + 1] + + # New: lists are used anyway, so copy all + # %% + # Copy lists for iteration + cCox = [x[:] for x in Cox[:i + 1]] + cCcx = [x[:] for x in Ccx[:i + 1]] + cCux = [x[:] for x in Cux[:i + 1]] + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the + # (VL, VC, VU) for-loop, but we use the copy of the list in the + # ab_Cc for-loop. + s_ab_Cc = copy.copy(s_ab_C) + + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + if tuple(t_a_vl) not in self.V.cache: + # Raise error to continue symmetric refine + raise IndexError + t_a_vu = list(vut) + t_a_vu[i + 1] = vut[i + 1] + if tuple(t_a_vu) not in self.V.cache: + # Raise error to continue symmetric refine: + raise IndexError + + for vectors in s_ab_Cc: + # s_ab_C.append([c_vc, vl, vu, a_vu]) + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vu = list(vectors[3].x) + + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + + b_vl_c = self.split_edge(b_vu.x, b_vl.x) + bc_vc.connect(b_vl_c) + + yield b_vu + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + b_vu_c = self.split_edge(b_vu.x, ba_vu.x) + bc_vc.connect(b_vu_c) + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + + # comb = [c_vc, vl, vu, a_vl, a_vu, + # bc_vc, b_vl, b_vu, ba_vl, ba_vu] + comb = [vl, vu, a_vu, + b_vl, b_vu, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + # Add new list of cross pairs + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev + + for vectors in ab_Cc: + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + yield b_vu + ba_vl = self.V[tuple(ba_vl)] + bc_vc.connect(ba_vl) # Connect aN cross pairs + d_bc_vc.connect(ba_vl) # Connect all to centroid + self.split_edge(b_vu.x, ba_vl.x) + yield ba_vl + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs + yield d_ba_vl + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + c_vc, vl, vu, a_vl, a_vu = vectors + + comb = [vl, vu, a_vl, a_vu, + b_vl, b_vu, ba_vl, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Add new list of cross pairs + ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu)) + ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu)) + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl)) + + for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)): + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper C3 group in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + a_vu = self.V[tuple(a_vu)] + # Note, build (a + vc) later for consistent yields + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + # Build cN vertices for each lower-upper C3 group in N: + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vl) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield c_vc.x + c_vl = self.split_edge(vl.x, a_vl.x) + c_vl.connect(vco) + c_vc.connect(c_vl) # Connect cN group vertices + yield c_vl.x + # yield at end of loop: + c_vu = self.split_edge(vu.x, a_vu.x) + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield c_vu.x + + a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ? + a_vc.connect(vco) + a_vc.connect(c_vc) + + # Storage for connecting c + ac operations: + ab_C.append((c_vc, vl, vu, a_vl, a_vu)) + + # Update the containers + Cox[i + 1].append(vl) + Cox[i + 1].append(vc) + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vl) + Ccx[i + 1].append(c_vc) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vl) + Cux[i + 1].append(a_vc) + Cux[i + 1].append(a_vu) + + # Update old containers + Cox[j].append(c_vl) # ! + Cox[j].append(a_vl) + Ccx[j].append(c_vc) # ! + Ccx[j].append(a_vc) # ! + Cux[j].append(c_vu) # ! + Cux[j].append(a_vu) + + # Yield new points + yield a_vc.x + + except IndexError: + for vectors in ab_Cc: + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + ba_vu = self.V[tuple(ba_vu)] + yield ba_vu + d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s + yield ba_vu + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + ba_vl = self.V[tuple(ba_vl)] + yield ba_vl + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x) + yield d_ba_vl + yield d_ba_vu + yield d_ba_vc + c_vc, vl, vu, a_vl, a_vu = vectors + comb = [vl, vu, a_vl, a_vu, + ba_vl, + ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Copy lists for iteration + cCox = Cox[i] + cCcx = Ccx[i] + cCux = Cux[i] + VL, VC, VU = cCox, cCcx, cCux + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + yield a_vl.x + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield (c_vc.x) + c_vu = self.split_edge(vu.x, + a_vu.x) # yield at end of loop + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield (c_vu.x) + + # Update the containers + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vu) + + # Update old containers + s_ab_C.append([c_vc, vl, vu, a_vu]) + + yield a_vu.x + + # Clean class trash + try: + del Cox + del Ccx + del Cux + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + try: + self.triangulated_vectors.remove((tuple(origin_c), + tuple(supremum_c))) + except ValueError: + # Turn this into a logging warning? + pass + # Add newly triangulated vectors: + for vs in sup_set: + self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x))) + + # Extra yield to ensure that the triangulation is completed + if centroid: + vcn_set = set() + c_nn_lists = [] + for vs in sup_set: + # Build centroid + c_nn = self.vpool(vco.x, vs.x) + try: + c_nn.remove(vcn_set) + except KeyError: + pass + c_nn_lists.append(c_nn) + + for c_nn in c_nn_lists: + try: + c_nn.remove(vcn_set) + except KeyError: + pass + + for vs, c_nn in zip(sup_set, c_nn_lists): + # Build centroid + vcn = self.split_edge(vco.x, vs.x) + vcn_set.add(vcn) + try: # Shouldn't be needed? + c_nn.remove(vcn_set) + except KeyError: + pass + for vnn in c_nn: + vcn.connect(vnn) + yield vcn.x + else: + pass + + yield vut + return + + def refine_star(self, v): + """Refine the star domain of a vertex `v`.""" + # Copy lists before iteration + vnn = copy.copy(v.nn) + v1nn = [] + d_v0v1_set = set() + for v1 in vnn: + v1nn.append(copy.copy(v1.nn)) + + for v1, v1nn in zip(vnn, v1nn): + vnnu = v1nn.intersection(vnn) + + d_v0v1 = self.split_edge(v.x, v1.x) + for o_d_v0v1 in d_v0v1_set: + d_v0v1.connect(o_d_v0v1) + d_v0v1_set.add(d_v0v1) + for v2 in vnnu: + d_v1v2 = self.split_edge(v1.x, v2.x) + d_v0v1.connect(d_v1v2) + return + + @cache + def split_edge(self, v1, v2): + v1 = self.V[v1] + v2 = self.V[v2] + # Destroy original edge, if it exists: + v1.disconnect(v2) + # Compute vertex on centre of edge: + try: + vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a + except TypeError: # Allow for decimal operations + vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a + + vc = self.V[tuple(vct)] + # Connect to original 2 vertices to the new centre vertex + vc.connect(v1) + vc.connect(v2) + return vc + + def vpool(self, origin, supremum): + vot = tuple(origin) + vst = tuple(supremum) + # Initiate vertices in case they don't exist + vo = self.V[vot] + vs = self.V[vst] + + # Remove origin - supremum disconnect + + # Find the lower/upper bounds of the refinement hyperrectangle + bl = list(vot) + bu = list(vst) + for i, (voi, vsi) in enumerate(zip(vot, vst)): + if bl[i] > vsi: + bl[i] = vsi + if bu[i] < voi: + bu[i] = voi + + # NOTE: This is mostly done with sets/lists because we aren't sure + # how well the numpy arrays will scale to thousands of + # dimensions. + vn_pool = set() + vn_pool.update(vo.nn) + vn_pool.update(vs.nn) + cvn_pool = copy.copy(vn_pool) + for vn in cvn_pool: + for i, xi in enumerate(vn.x): + if bl[i] <= xi <= bu[i]: + pass + else: + try: + vn_pool.remove(vn) + except KeyError: + pass # NOTE: Not all neigbouds are in initial pool + return vn_pool + + def vf_to_vv(self, vertices, simplices): + """ + Convert a vertex-face mesh to a vertex-vertex mesh used by this class + + Parameters + ---------- + vertices : list + Vertices + simplices : list + Simplices + """ + if self.dim > 1: + for s in simplices: + edges = itertools.combinations(s, self.dim) + for e in edges: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + else: + for e in simplices: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + return + + def connect_vertex_non_symm(self, v_x, near=None): + """ + Adds a vertex at coords v_x to the complex that is not symmetric to the + initial triangulation and sub-triangulation. + + If near is specified (for example; a star domain or collections of + cells known to contain v) then only those simplices containd in near + will be searched, this greatly speeds up the process. + + If near is not specified this method will search the entire simplicial + complex structure. + + Parameters + ---------- + v_x : tuple + Coordinates of non-symmetric vertex + near : set or list + List of vertices, these are points near v to check for + """ + if near is None: + star = self.V + else: + star = near + # Create the vertex origin + if tuple(v_x) in self.V.cache: + if self.V[v_x] in self.V_non_symm: + pass + else: + return + + self.V[v_x] + found_nn = False + S_rows = [] + for v in star: + S_rows.append(v.x) + + S_rows = numpy.array(S_rows) + A = numpy.array(S_rows) - numpy.array(v_x) + # Iterate through all the possible simplices of S_rows + for s_i in itertools.combinations(range(S_rows.shape[0]), + r=self.dim + 1): + # Check if connected, else s_i is not a simplex + valid_simplex = True + for i in itertools.combinations(s_i, r=2): + # Every combination of vertices must be connected, we check of + # the current iteration of all combinations of s_i are + # connected we break the loop if it is not. + if ((self.V[tuple(S_rows[i[1]])] not in + self.V[tuple(S_rows[i[0]])].nn) + and (self.V[tuple(S_rows[i[0]])] not in + self.V[tuple(S_rows[i[1]])].nn)): + valid_simplex = False + break + + S = S_rows[tuple([s_i])] + if valid_simplex: + if self.deg_simplex(S, proj=None): + valid_simplex = False + + # If s_i is a valid simplex we can test if v_x is inside si + if valid_simplex: + # Find the A_j0 value from the precalculated values + A_j0 = A[tuple([s_i])] + if self.in_simplex(S, v_x, A_j0): + found_nn = True + # breaks the main for loop, s_i is the target simplex: + break + + # Connect the simplex to point + if found_nn: + for i in s_i: + self.V[v_x].connect(self.V[tuple(S_rows[i])]) + # Attached the simplex to storage for all non-symmetric vertices + self.V_non_symm.append(self.V[v_x]) + # this bool value indicates a successful connection if True: + return found_nn + + def in_simplex(self, S, v_x, A_j0=None): + """Check if a vector v_x is in simplex `S`. + + Parameters + ---------- + S : array_like + Array containing simplex entries of vertices as rows + v_x : + A candidate vertex + A_j0 : array, optional, + Allows for A_j0 to be pre-calculated + + Returns + ------- + res : boolean + True if `v_x` is in `S` + """ + A_11 = numpy.delete(S, 0, 0) - S[0] + + sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) + if sign_det_A_11 == 0: + # NOTE: We keep the variable A_11, but we loop through A_jj + # ind= + # while sign_det_A_11 == 0: + # A_11 = numpy.delete(S, ind, 0) - S[ind] + # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) + + sign_det_A_11 = -1 # TODO: Choose another det of j instead? + # TODO: Unlikely to work in many cases + + if A_j0 is None: + A_j0 = S - v_x + + for d in range(self.dim + 1): + det_A_jj = (-1)**d * sign_det_A_11 + # TODO: Note that scipy might be faster to add as an optional + # dependency + sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d, + 0))) + # TODO: Note if sign_det_A_j0 == then the point is coplanar to the + # current simplex facet, so perhaps return True and attach? + if det_A_jj == sign_det_A_j0: + continue + else: + return False + + return True + + def deg_simplex(self, S, proj=None): + """Test a simplex S for degeneracy (linear dependence in R^dim). + + Parameters + ---------- + S : np.array + Simplex with rows as vertex vectors + proj : array, optional, + If the projection S[1:] - S[0] is already + computed it can be added as an optional argument. + """ + # Strategy: we test all combination of faces, if any of the + # determinants are zero then the vectors lie on the same face and is + # therefore linearly dependent in the space of R^dim + if proj is None: + proj = S[1:] - S[0] + + # TODO: Is checking the projection of one vertex against faces of other + # vertices sufficient? Or do we need to check more vertices in + # dimensions higher than 2? + # TODO: Literature seems to suggest using proj.T, but why is this + # needed? + if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance? + return True # Simplex is degenerate + else: + return False # Simplex is not degenerate diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py new file mode 100644 index 0000000000000000000000000000000000000000..e47558ee7b9a181638841c34bb63603b5d37e221 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py @@ -0,0 +1,460 @@ +import collections +from abc import ABC, abstractmethod + +import numpy as np + +from scipy._lib._util import MapWrapper + + +class VertexBase(ABC): + """ + Base class for a vertex. + """ + def __init__(self, x, nn=None, index=None): + """ + Initiation of a vertex object. + + Parameters + ---------- + x : tuple or vector + The geometric location (domain). + nn : list, optional + Nearest neighbour list. + index : int, optional + Index of vertex. + """ + self.x = x + self.hash = hash(self.x) # Save precomputed hash + + if nn is not None: + self.nn = set(nn) # can use .indexupdate to add a new list + else: + self.nn = set() + + self.index = index + + def __hash__(self): + return self.hash + + def __getattr__(self, item): + if item not in ['x_a']: + raise AttributeError(f"{type(self)} object has no attribute " + f"'{item}'") + if item == 'x_a': + self.x_a = np.array(self.x) + return self.x_a + + @abstractmethod + def connect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + @abstractmethod + def disconnect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + def star(self): + """Returns the star domain ``st(v)`` of the vertex. + + Parameters + ---------- + v : + The vertex ``v`` in ``st(v)`` + + Returns + ------- + st : set + A set containing all the vertices in ``st(v)`` + """ + self.st = self.nn + self.st.add(self) + return self.st + + +class VertexScalarField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R associated with + the geometry built from the VertexBase class + """ + + def __init__(self, x, field=None, nn=None, index=None, field_args=(), + g_cons=None, g_cons_args=()): + """ + Parameters + ---------- + x : tuple, + vector of vertex coordinates + field : callable, optional + a scalar field f: R^n --> R associated with the geometry + nn : list, optional + list of nearest neighbours + index : int, optional + index of the vertex + field_args : tuple, optional + additional arguments to be passed to field + g_cons : callable, optional + constraints on the vertex + g_cons_args : tuple, optional + additional arguments to be passed to g_cons + + """ + super().__init__(x, nn=nn, index=index) + + # Note Vertex is only initiated once for all x so only + # evaluated once + # self.feasible = None + + # self.f is externally defined by the cache to allow parallel + # processing + # None type that will break arithmetic operations unless defined + # self.f = None + + self.check_min = True + self.check_max = True + + def connect(self, v): + """Connects self to another vertex object v. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def minimiser(self): + """Check whether this vertex is strictly less than all its + neighbours""" + if self.check_min: + self._min = all(self.f < v.f for v in self.nn) + self.check_min = False + + return self._min + + def maximiser(self): + """ + Check whether this vertex is strictly greater than all its + neighbours. + """ + if self.check_max: + self._max = all(self.f > v.f for v in self.nn) + self.check_max = False + + return self._max + + +class VertexVectorField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R^m associated with + the geometry built from the VertexBase class. + """ + + def __init__(self, x, sfield=None, vfield=None, field_args=(), + vfield_args=(), g_cons=None, + g_cons_args=(), nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + raise NotImplementedError("This class is still a work in progress") + + +class VertexCacheBase: + """Base class for a vertex cache for a simplicial complex.""" + def __init__(self): + + self.cache = collections.OrderedDict() + self.nfev = 0 # Feasible points + self.index = -1 + + def __iter__(self): + for v in self.cache: + yield self.cache[v] + return + + def size(self): + """Returns the size of the vertex cache.""" + return self.index + 1 + + def print_out(self): + headlen = len(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + print(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + for v in self.cache: + self.cache[v].print_out() + + +class VertexCube(VertexBase): + """Vertex class to be used for a pure simplicial complex with no associated + differential geometry (single level domain that exists in R^n)""" + def __init__(self, x, nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + def connect(self, v): + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + +class VertexCacheIndex(VertexCacheBase): + def __init__(self): + """ + Class for a vertex cache for a simplicial complex without an associated + field. Useful only for building and visualising a domain complex. + + Parameters + ---------- + """ + super().__init__() + self.Vertex = VertexCube + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, index=self.index) + # logging.info("New generated vertex at x = {}".format(x)) + # NOTE: Surprisingly high performance increase if logging + # is commented out + self.cache[x] = xval + return self.cache[x] + + +class VertexCacheField(VertexCacheBase): + def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(), + workers=1): + """ + Class for a vertex cache for a simplicial complex with an associated + field. + + Parameters + ---------- + field : callable + Scalar or vector field callable. + field_args : tuple, optional + Any additional fixed parameters needed to completely specify the + field function + g_cons : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + g_cons_args : tuple, optional + Any additional fixed parameters needed to completely specify the + constraint functions + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + + """ + super().__init__() + self.index = -1 + self.Vertex = VertexScalarField + self.field = field + self.field_args = field_args + self.wfield = FieldWrapper(field, field_args) # if workers is not 1 + + self.g_cons = g_cons + self.g_cons_args = g_cons_args + self.wgcons = ConstraintWrapper(g_cons, g_cons_args) + self.gpool = set() # A set of tuples to process for feasibility + + # Field processing objects + self.fpool = set() # A set of tuples to process for scalar function + self.sfc_lock = False # True if self.fpool is non-Empty + + self.workers = workers + self._mapwrapper = MapWrapper(workers) + + if workers == 1: + self.process_gpool = self.proc_gpool + if g_cons is None: + self.process_fpool = self.proc_fpool_nog + else: + self.process_fpool = self.proc_fpool_g + else: + self.process_gpool = self.pproc_gpool + if g_cons is None: + self.process_fpool = self.pproc_fpool_nog + else: + self.process_fpool = self.pproc_fpool_g + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, field=self.field, nn=nn, index=self.index, + field_args=self.field_args, + g_cons=self.g_cons, + g_cons_args=self.g_cons_args) + + self.cache[x] = xval # Define in cache + self.gpool.add(xval) # Add to pool for processing feasibility + self.fpool.add(xval) # Add to pool for processing field values + return self.cache[x] + + def __getstate__(self): + self_dict = self.__dict__.copy() + del self_dict['pool'] + return self_dict + + def process_pools(self): + if self.g_cons is not None: + self.process_gpool() + self.process_fpool() + self.proc_minimisers() + + def feasibility_check(self, v): + v.feasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v.x_a, *args) < 0.0): + v.f = np.inf + v.feasible = False + break + + def compute_sfield(self, v): + """Compute the scalar field values of a vertex object `v`. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + try: + v.f = self.field(v.x_a, *self.field_args) + self.nfev += 1 + except AttributeError: + v.f = np.inf + # logging.warning(f"Field function not found at x = {self.x_a}") + if np.isnan(v.f): + v.f = np.inf + + def proc_gpool(self): + """Process all constraints.""" + if self.g_cons is not None: + for v in self.gpool: + self.feasibility_check(v) + # Clean the pool + self.gpool = set() + + def pproc_gpool(self): + """Process all constraints in parallel.""" + gpool_l = [] + for v in self.gpool: + gpool_l.append(v.x_a) + + G = self._mapwrapper(self.wgcons.gcons, gpool_l) + for v, g in zip(self.gpool, G): + v.feasible = g # set vertex object attribute v.feasible = g (bool) + + def proc_fpool_g(self): + """Process all field functions with constraints supplied.""" + for v in self.fpool: + if v.feasible: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def proc_fpool_nog(self): + """Process all field functions with no constraints supplied.""" + for v in self.fpool: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def pproc_fpool_g(self): + """ + Process all field functions with constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + if v.feasible: + fpool_l.append(v.x_a) + else: + v.f = np.inf + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def pproc_fpool_nog(self): + """ + Process all field functions with no constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + fpool_l.append(v.x_a) + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def proc_minimisers(self): + """Check for minimisers.""" + for v in self: + v.minimiser() + v.maximiser() + + +class ConstraintWrapper: + """Object to wrap constraints to pass to `multiprocessing.Pool`.""" + def __init__(self, g_cons, g_cons_args): + self.g_cons = g_cons + self.g_cons_args = g_cons_args + + def gcons(self, v_x_a): + vfeasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v_x_a, *args) < 0.0): + vfeasible = False + break + return vfeasible + + +class FieldWrapper: + """Object to wrap field to pass to `multiprocessing.Pool`.""" + def __init__(self, field, field_args): + self.field = field + self.field_args = field_args + + def func(self, v_x_a): + try: + v_f = self.field(v_x_a, *self.field_args) + except Exception: + v_f = np.inf + if np.isnan(v_f): + v_f = np.inf + + return v_f diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549cfb9760dda474cb858b7b36d236af48111067 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py @@ -0,0 +1,6 @@ +"""This module contains the equality constrained SQP solver.""" + + +from .minimize_trustregion_constr import _minimize_trustregion_constr + +__all__ = ['_minimize_trustregion_constr'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2595a832d4ea3a4e696e862f51401d7f0b3f40be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93c70128653982227d15fa69050eb2e147df1325 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1dc6451839a8200070a092a7829deb0b745e55c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a620f5af705cb7bcdedfd19fe5709a9034b8f48 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ca10028f502d36937bb5104cb3e83d0cb9a3dba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8041cba72c617b79bca29ff28d84b4d3c268d8d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52a9ee05a9c5209ceba90079c6e9e55d97d4f622 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83291c6a24ea71e01ad826f1e18501966ea32b12 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ad583bb8eee524d35c2e5bb16934f78629cd69 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py @@ -0,0 +1,390 @@ +import numpy as np +import scipy.sparse as sps + + +class CanonicalConstraint: + """Canonical constraint to use with trust-constr algorithm. + + It represents the set of constraints of the form:: + + f_eq(x) = 0 + f_ineq(x) <= 0 + + where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see + below. + + The class is supposed to be instantiated by factory methods, which + should prepare the parameters listed below. + + Parameters + ---------- + n_eq, n_ineq : int + Number of equality and inequality constraints respectively. + fun : callable + Function defining the constraints. The signature is + ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` + components and ``c_ineq`` is ndarray with `n_ineq` components. + jac : callable + Function to evaluate the Jacobian of the constraint. The signature + is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are + either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n), + respectively. + hess : callable + Function to evaluate the Hessian of the constraints multiplied + by Lagrange multipliers, that is + ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is + ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied + shape (n, n) and provide a matrix-vector product operation + ``H.dot(p)``. + keep_feasible : ndarray, shape (n_ineq,) + Mask indicating which inequality constraints should be kept feasible. + """ + def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): + self.n_eq = n_eq + self.n_ineq = n_ineq + self.fun = fun + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + @classmethod + def from_PreparedConstraint(cls, constraint): + """Create an instance from `PreparedConstrained` object.""" + lb, ub = constraint.bounds + cfun = constraint.fun + keep_feasible = constraint.keep_feasible + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + elif np.all(lb == ub): + return cls._equal_to_canonical(cfun, lb) + elif np.all(lb == -np.inf): + return cls._less_to_canonical(cfun, ub, keep_feasible) + elif np.all(ub == np.inf): + return cls._greater_to_canonical(cfun, lb, keep_feasible) + else: + return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) + + @classmethod + def empty(cls, n): + """Create an "empty" instance. + + This "empty" instance is required to allow working with unconstrained + problems as if they have some constraints. + """ + empty_fun = np.empty(0) + empty_jac = np.empty((0, n)) + empty_hess = sps.csr_matrix((n, n)) + + def fun(x): + return empty_fun, empty_fun + + def jac(x): + return empty_jac, empty_jac + + def hess(x, v_eq, v_ineq): + return empty_hess + + return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_)) + + @classmethod + def concatenate(cls, canonical_constraints, sparse_jacobian): + """Concatenate multiple `CanonicalConstraint` into one. + + `sparse_jacobian` (bool) determines the Jacobian format of the + concatenated constraint. Note that items in `canonical_constraints` + must have their Jacobians in the same format. + """ + def fun(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.fun(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return np.hstack(eq_all), np.hstack(ineq_all) + + if sparse_jacobian: + vstack = sps.vstack + else: + vstack = np.vstack + + def jac(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.jac(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return vstack(eq_all), vstack(ineq_all) + + def hess(x, v_eq, v_ineq): + hess_all = [] + index_eq = 0 + index_ineq = 0 + for c in canonical_constraints: + vc_eq = v_eq[index_eq:index_eq + c.n_eq] + vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] + hess_all.append(c.hess(x, vc_eq, vc_ineq)) + index_eq += c.n_eq + index_ineq += c.n_ineq + + def matvec(p): + result = np.zeros_like(p) + for h in hess_all: + result += h.dot(p) + return result + + n = x.shape[0] + return sps.linalg.LinearOperator((n, n), matvec, dtype=float) + + n_eq = sum(c.n_eq for c in canonical_constraints) + n_ineq = sum(c.n_ineq for c in canonical_constraints) + keep_feasible = np.hstack([c.keep_feasible for c in + canonical_constraints]) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _equal_to_canonical(cls, cfun, value): + empty_fun = np.empty(0) + n = cfun.n + + n_eq = value.shape[0] + n_ineq = 0 + keep_feasible = np.empty(0, dtype=bool) + + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + def fun(x): + return cfun.fun(x) - value, empty_fun + + def jac(x): + return cfun.jac(x), empty_jac + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_eq) + + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _less_to_canonical(cls, cfun, ub, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_ub = ub < np.inf + n_eq = 0 + n_ineq = np.sum(finite_ub) + + if np.all(finite_ub): + def fun(x): + return empty_fun, cfun.fun(x) - ub + + def jac(x): + return empty_jac, cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_ineq) + else: + finite_ub = np.nonzero(finite_ub)[0] + keep_feasible = keep_feasible[finite_ub] + ub = ub[finite_ub] + + def fun(x): + return empty_fun, cfun.fun(x)[finite_ub] - ub + + def jac(x): + return empty_jac, cfun.jac(x)[finite_ub] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_ub] = v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _greater_to_canonical(cls, cfun, lb, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_lb = lb > -np.inf + n_eq = 0 + n_ineq = np.sum(finite_lb) + + if np.all(finite_lb): + def fun(x): + return empty_fun, lb - cfun.fun(x) + + def jac(x): + return empty_jac, -cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, -v_ineq) + else: + finite_lb = np.nonzero(finite_lb)[0] + keep_feasible = keep_feasible[finite_lb] + lb = lb[finite_lb] + + def fun(x): + return empty_fun, lb - cfun.fun(x)[finite_lb] + + def jac(x): + return empty_jac, -cfun.jac(x)[finite_lb] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_lb] = -v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + equal = np.nonzero(equal)[0] + less = np.nonzero(less)[0] + greater = np.nonzero(greater)[0] + interval = np.nonzero(interval)[0] + n_less = less.shape[0] + n_greater = greater.shape[0] + n_interval = interval.shape[0] + n_ineq = n_less + n_greater + 2 * n_interval + n_eq = equal.shape[0] + + keep_feasible = np.hstack((keep_feasible[less], + keep_feasible[greater], + keep_feasible[interval], + keep_feasible[interval])) + + def fun(x): + f = cfun.fun(x) + eq = f[equal] - lb[equal] + le = f[less] - ub[less] + ge = lb[greater] - f[greater] + il = f[interval] - ub[interval] + ig = lb[interval] - f[interval] + return eq, np.hstack((le, ge, il, ig)) + + def jac(x): + J = cfun.jac(x) + eq = J[equal] + le = J[less] + ge = -J[greater] + il = J[interval] + ig = -il + if sps.issparse(J): + ineq = sps.vstack((le, ge, il, ig)) + else: + ineq = np.vstack((le, ge, il, ig)) + return eq, ineq + + def hess(x, v_eq, v_ineq): + n_start = 0 + v_l = v_ineq[n_start:n_start + n_less] + n_start += n_less + v_g = v_ineq[n_start:n_start + n_greater] + n_start += n_greater + v_il = v_ineq[n_start:n_start + n_interval] + n_start += n_interval + v_ig = v_ineq[n_start:n_start + n_interval] + + v = np.zeros_like(lb) + v[equal] = v_eq + v[less] = v_l + v[greater] = -v_g + v[interval] = v_il - v_ig + + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + +def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): + """Convert initial values of the constraints to the canonical format. + + The purpose to avoid one additional call to the constraints at the initial + point. It takes saved values in `PreparedConstraint`, modififies and + concatenates them to the canonical constraint format. + """ + c_eq = [] + c_ineq = [] + J_eq = [] + J_ineq = [] + + for c in prepared_constraints: + f = c.fun.f + J = c.fun.J + lb, ub = c.bounds + if np.all(lb == ub): + c_eq.append(f - lb) + J_eq.append(J) + elif np.all(lb == -np.inf): + finite_ub = ub < np.inf + c_ineq.append(f[finite_ub] - ub[finite_ub]) + J_ineq.append(J[finite_ub]) + elif np.all(ub == np.inf): + finite_lb = lb > -np.inf + c_ineq.append(lb[finite_lb] - f[finite_lb]) + J_ineq.append(-J[finite_lb]) + else: + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + c_eq.append(f[equal] - lb[equal]) + c_ineq.append(f[less] - ub[less]) + c_ineq.append(lb[greater] - f[greater]) + c_ineq.append(f[interval] - ub[interval]) + c_ineq.append(lb[interval] - f[interval]) + + J_eq.append(J[equal]) + J_ineq.append(J[less]) + J_ineq.append(-J[greater]) + J_ineq.append(J[interval]) + J_ineq.append(-J[interval]) + + c_eq = np.hstack(c_eq) if c_eq else np.empty(0) + c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) + + if sparse_jacobian: + vstack = sps.vstack + empty = sps.csr_matrix((0, n)) + else: + vstack = np.vstack + empty = np.empty((0, n)) + + J_eq = vstack(J_eq) if J_eq else empty + J_ineq = vstack(J_ineq) if J_ineq else empty + + return c_eq, c_ineq, J_eq, J_ineq diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py new file mode 100644 index 0000000000000000000000000000000000000000..d50e1e792bef91207aa20447bd36386925d38d28 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py @@ -0,0 +1,217 @@ +"""Byrd-Omojokun Trust-Region SQP method.""" + +from scipy.sparse import eye as speye +from .projections import projections +from .qp_subproblem import modified_dogleg, projected_cg, box_intersections +import numpy as np +from numpy.linalg import norm + +__all__ = ['equality_constrained_sqp'] + + +def default_scaling(x): + n, = np.shape(x) + return speye(n) + + +def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess, + x0, fun0, grad0, constr0, + jac0, stop_criteria, + state, + initial_penalty, + initial_trust_radius, + factorization_method, + trust_lb=None, + trust_ub=None, + scaling=default_scaling): + """Solve nonlinear equality-constrained problem using trust-region SQP. + + Solve optimization problem: + + minimize fun(x) + subject to: constr(x) = 0 + + using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several + implementation details are based on [2]_ and [3]_, p. 549. + + References + ---------- + .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the + implementation of an algorithm for large-scale equality + constrained optimization." SIAM Journal on + Optimization 8.3 (1998): 682-706. + .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891. + LARGE_REDUCTION_RATIO = 0.9 + INTERMEDIARY_REDUCTION_RATIO = 0.3 + SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892. + TRUST_ENLARGEMENT_FACTOR_L = 7.0 + TRUST_ENLARGEMENT_FACTOR_S = 2.0 + MAX_TRUST_REDUCTION = 0.5 + MIN_TRUST_REDUCTION = 0.1 + SOC_THRESHOLD = 0.1 + TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885. + BOX_FACTOR = 0.5 + + n, = np.shape(x0) # Number of parameters + + # Set default lower and upper bounds. + if trust_lb is None: + trust_lb = np.full(n, -np.inf) + if trust_ub is None: + trust_ub = np.full(n, np.inf) + + # Initial values + x = np.copy(x0) + trust_radius = initial_trust_radius + penalty = initial_penalty + # Compute Values + f = fun0 + c = grad0 + b = constr0 + A = jac0 + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + + # Update state parameters + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + cg_info = {'niter': 0, 'stop_cond': 0, + 'hits_boundary': False} + + last_iteration_failed = False + while not stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + # Normal Step - `dn` + # minimize 1/2*||A dn + b||^2 + # subject to: + # ||dn|| <= TR_FACTOR * trust_radius + # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub. + dn = modified_dogleg(A, Y, b, + TR_FACTOR*trust_radius, + BOX_FACTOR*trust_lb, + BOX_FACTOR*trust_ub) + + # Tangential Step - `dt` + # Solve the QP problem: + # minimize 1/2 dt.T H dt + dt.T (H dn + c) + # subject to: + # A dt = 0 + # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2) + # lb - dn <= dt <= ub - dn + c_t = H.dot(dn) + c + b_t = np.zeros_like(b) + trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2) + lb_t = trust_lb - dn + ub_t = trust_ub - dn + dt, cg_info = projected_cg(H, c_t, Z, Y, b_t, + trust_radius_t, + lb_t, ub_t) + + # Compute update (normal + tangential steps). + d = dn + dt + + # Compute second order model: 1/2 d H d + c.T d + f. + quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d) + # Compute linearized constraint: l = A d + b. + linearized_constr = A.dot(d)+b + # Compute new penalty parameter according to formula (3.52), + # reference [2]_, p.891. + vpred = norm(b) - norm(linearized_constr) + # Guarantee `vpred` always positive, + # regardless of roundoff errors. + vpred = max(1e-16, vpred) + previous_penalty = penalty + if quadratic_model > 0: + new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred) + penalty = max(penalty, new_penalty) + # Compute predicted reduction according to formula (3.52), + # reference [2]_, p.891. + predicted_reduction = -quadratic_model + penalty*vpred + + # Compute merit function at current point + merit_function = f + penalty*norm(b) + # Evaluate function and constraints at trial point + x_next = x + S.dot(d) + f_next, b_next = fun_and_constr(x_next) + # Compute merit function at trial point + merit_function_next = f_next + penalty*norm(b_next) + # Compute actual reduction according to formula (3.54), + # reference [2]_, p.892. + actual_reduction = merit_function - merit_function_next + # Compute reduction ratio + reduction_ratio = actual_reduction / predicted_reduction + + # Second order correction (SOC), reference [2]_, p.892. + if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \ + norm(dn) <= SOC_THRESHOLD * norm(dt): + # Compute second order correction + y = -Y.dot(b_next) + # Make sure increment is inside box constraints + _, t, intersect = box_intersections(d, y, trust_lb, trust_ub) + # Compute tentative point + x_soc = x + S.dot(d + t*y) + f_soc, b_soc = fun_and_constr(x_soc) + # Recompute actual reduction + merit_function_soc = f_soc + penalty*norm(b_soc) + actual_reduction_soc = merit_function - merit_function_soc + # Recompute reduction ratio + reduction_ratio_soc = actual_reduction_soc / predicted_reduction + if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO: + x_next = x_soc + f_next = f_soc + b_next = b_soc + reduction_ratio = reduction_ratio_soc + + # Readjust trust region step, formula (3.55), reference [2]_, p.892. + if reduction_ratio >= LARGE_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d), + trust_radius) + elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d), + trust_radius) + # Reduce trust region step, according to reference [3]_, p.696. + elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO: + trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) / + (1-reduction_ratio)) + new_trust_radius = trust_reduction * norm(d) + if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius: + trust_radius *= MAX_TRUST_REDUCTION + elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius: + trust_radius = new_trust_radius + else: + trust_radius *= MIN_TRUST_REDUCTION + + # Update iteration + if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO: + x = x_next + f, b = f_next, b_next + c, A = grad_and_jac(x) + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + # Set Flag + last_iteration_failed = False + # Otimality values + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + else: + penalty = previous_penalty + last_iteration_failed = True + + return x, state diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py new file mode 100644 index 0000000000000000000000000000000000000000..2835ea5445c0eafc303f0cb1ab8543f48b7e3bb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py @@ -0,0 +1,564 @@ +import time +import numpy as np +from scipy.sparse.linalg import LinearOperator +from .._differentiable_functions import VectorFunction +from .._constraints import ( + NonlinearConstraint, LinearConstraint, PreparedConstraint, Bounds, strict_bounds) +from .._hessian_update_strategy import BFGS +from .._optimize import OptimizeResult +from .._differentiable_functions import ScalarFunction +from .equality_constrained_sqp import equality_constrained_sqp +from .canonical_constraint import (CanonicalConstraint, + initial_constraints_as_canonical) +from .tr_interior_point import tr_interior_point +from .report import BasicReport, SQPReport, IPReport + + +TERMINATION_MESSAGES = { + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`xtol` termination condition is satisfied.", + 3: "`callback` function requested termination." +} + + +class HessianLinearOperator: + """Build LinearOperator from hessp""" + def __init__(self, hessp, n): + self.hessp = hessp + self.n = n + + def __call__(self, x, *args): + def matvec(p): + return self.hessp(x, p, *args) + + return LinearOperator((self.n, self.n), matvec=matvec) + + +class LagrangianHessian: + """The Hessian of the Lagrangian as LinearOperator. + + The Lagrangian is computed as the objective function plus all the + constraints multiplied with some numbers (Lagrange multipliers). + """ + def __init__(self, n, objective_hess, constraints_hess): + self.n = n + self.objective_hess = objective_hess + self.constraints_hess = constraints_hess + + def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): + H_objective = self.objective_hess(x) + H_constraints = self.constraints_hess(x, v_eq, v_ineq) + + def matvec(p): + return H_objective.dot(p) + H_constraints.dot(p) + + return LinearOperator((self.n, self.n), matvec) + + +def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, + start_time, tr_radius, constr_penalty, cg_info): + state.nit += 1 + state.nfev = objective.nfev + state.njev = objective.ngev + state.nhev = objective.nhev + state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + + if not last_iteration_failed: + state.x = x + state.fun = objective.f + state.grad = objective.g + state.v = [c.fun.v for c in prepared_constraints] + state.constr = [c.fun.f for c in prepared_constraints] + state.jac = [c.fun.J for c in prepared_constraints] + # Compute Lagrangian Gradient + state.lagrangian_grad = np.copy(state.grad) + for c in prepared_constraints: + state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) + state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) + # Compute maximum constraint violation + state.constr_violation = 0 + for i in range(len(prepared_constraints)): + lb, ub = prepared_constraints[i].bounds + c = state.constr[i] + state.constr_violation = np.max([state.constr_violation, + np.max(lb - c), + np.max(c - ub)]) + + state.execution_time = time.time() - start_time + state.tr_radius = tr_radius + state.constr_penalty = constr_penalty + state.cg_niter += cg_info["niter"] + state.cg_stop_cond = cg_info["stop_cond"] + + return state + + +def update_state_ip(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, + tr_radius, constr_penalty, cg_info, + barrier_parameter, barrier_tolerance): + state = update_state_sqp(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, tr_radius, + constr_penalty, cg_info) + state.barrier_parameter = barrier_parameter + state.barrier_tolerance = barrier_tolerance + return state + + +def _minimize_trustregion_constr(fun, x0, args, grad, + hess, hessp, bounds, constraints, + xtol=1e-8, gtol=1e-8, + barrier_tol=1e-8, + sparse_jacobian=None, + callback=None, maxiter=1000, + verbose=0, finite_diff_rel_step=None, + initial_constr_penalty=1.0, initial_tr_radius=1.0, + initial_barrier_parameter=0.1, + initial_barrier_tolerance=0.1, + factorization_method=None, + disp=False): + """Minimize a scalar function subject to constraints. + + Parameters + ---------- + gtol : float, optional + Tolerance for termination by the norm of the Lagrangian gradient. + The algorithm will terminate when both the infinity norm (i.e., max + abs value) of the Lagrangian gradient and the constraint violation + are smaller than ``gtol``. Default is 1e-8. + xtol : float, optional + Tolerance for termination by the change of the independent variable. + The algorithm will terminate when ``tr_radius < xtol``, where + ``tr_radius`` is the radius of the trust region used in the algorithm. + Default is 1e-8. + barrier_tol : float, optional + Threshold on the barrier parameter for the algorithm termination. + When inequality constraints are present, the algorithm will terminate + only when the barrier parameter is less than `barrier_tol`. + Default is 1e-8. + sparse_jacobian : {bool, None}, optional + Determines how to represent Jacobians of the constraints. If bool, + then Jacobians of all the constraints will be converted to the + corresponding format. If None (default), then Jacobians won't be + converted, but the algorithm can proceed only if they all have the + same format. + initial_tr_radius: float, optional + Initial trust radius. The trust radius gives the maximum distance + between solution points in consecutive iterations. It reflects the + trust the algorithm puts in the local approximation of the optimization + problem. For an accurate local approximation the trust-region should be + large and for an approximation valid only close to the current point it + should be a small one. The trust radius is automatically updated throughout + the optimization process, with ``initial_tr_radius`` being its initial value. + Default is 1 (recommended in [1]_, p. 19). + initial_constr_penalty : float, optional + Initial constraints penalty parameter. The penalty parameter is used for + balancing the requirements of decreasing the objective function + and satisfying the constraints. It is used for defining the merit function: + ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, + where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all + the constraints. The merit function is used for accepting or rejecting + trial points and ``constr_penalty`` weights the two conflicting goals + of reducing objective function and constraints. The penalty is automatically + updated throughout the optimization process, with + ``initial_constr_penalty`` being its initial value. Default is 1 + (recommended in [1]_, p 19). + initial_barrier_parameter, initial_barrier_tolerance: float, optional + Initial barrier parameter and initial tolerance for the barrier subproblem. + Both are used only when inequality constraints are present. For dealing with + optimization problems ``min_x f(x)`` subject to inequality constraints + ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem + ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality + constraints ``c(x) + s = 0`` instead of the original problem. This subproblem + is solved for decreasing values of ``barrier_parameter`` and with decreasing + tolerances for the termination, starting with ``initial_barrier_parameter`` + for the barrier parameter and ``initial_barrier_tolerance`` for the + barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). + Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated + with the same prefactor. + factorization_method : string or None, optional + Method to factorize the Jacobian of the constraints. Use None (default) + for the auto selection or one of: + + - 'NormalEquation' (requires scikit-sparse) + - 'AugmentedSystem' + - 'QRFactorization' + - 'SVDFactorization' + + The methods 'NormalEquation' and 'AugmentedSystem' can be used only + with sparse constraints. The projections required by the algorithm + will be computed using, respectively, the normal equation and the + augmented system approaches explained in [1]_. 'NormalEquation' + computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' + performs the LU factorization of an augmented system. They usually + provide similar results. 'AugmentedSystem' is used by default for + sparse matrices. + + The methods 'QRFactorization' and 'SVDFactorization' can be used + only with dense constraints. They compute the required projections + using, respectively, QR and SVD factorizations. The 'SVDFactorization' + method can cope with Jacobian matrices with deficient row rank and will + be used whenever other factorization methods fail (which may imply the + conversion of sparse matrices to a dense format when required). + By default, 'QRFactorization' is used for dense matrices. + finite_diff_rel_step : None or array_like, optional + Relative step size for the finite difference approximation. + maxiter : int, optional + Maximum number of algorithm iterations. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + * 3 : display progress during iterations (more complete report). + + disp : bool, optional + If True (default), then `verbose` will be set to 1 if it was 0. + + Returns + ------- + `OptimizeResult` with the fields documented below. Note the following: + + 1. All values corresponding to the constraints are ordered as they + were passed to the solver. And values corresponding to `bounds` + constraints are put *after* other constraints. + 2. All numbers of function, Jacobian or Hessian evaluations correspond + to numbers of actual Python function calls. It means, for example, + that if a Jacobian is estimated by finite differences, then the + number of Jacobian evaluations will be zero and the number of + function evaluations will be incremented by all calls during the + finite difference estimation. + + x : ndarray, shape (n,) + Solution found. + optimality : float + Infinity norm of the Lagrangian gradient at the solution. + constr_violation : float + Maximum constraint violation at the solution. + fun : float + Objective function at the solution. + grad : ndarray, shape (n,) + Gradient of the objective function at the solution. + lagrangian_grad : ndarray, shape (n,) + Gradient of the Lagrangian function at the solution. + nit : int + Total number of iterations. + nfev : integer + Number of the objective function evaluations. + njev : integer + Number of the objective function gradient evaluations. + nhev : integer + Number of the objective function Hessian evaluations. + cg_niter : int + Total number of the conjugate gradient method iterations. + method : {'equality_constrained_sqp', 'tr_interior_point'} + Optimization method used. + constr : list of ndarray + List of constraint values at the solution. + jac : list of {ndarray, sparse matrix} + List of the Jacobian matrices of the constraints at the solution. + v : list of ndarray + List of the Lagrange multipliers for the constraints at the solution. + For an inequality constraint a positive multiplier means that the upper + bound is active, a negative multiplier means that the lower bound is + active and if a multiplier is zero it means the constraint is not + active. + constr_nfev : list of int + Number of constraint evaluations for each of the constraints. + constr_njev : list of int + Number of Jacobian matrix evaluations for each of the constraints. + constr_nhev : list of int + Number of Hessian evaluations for each of the constraints. + tr_radius : float + Radius of the trust region at the last iteration. + constr_penalty : float + Penalty parameter at the last iteration, see `initial_constr_penalty`. + barrier_tolerance : float + Tolerance for the barrier subproblem at the last iteration. + Only for problems with inequality constraints. + barrier_parameter : float + Barrier parameter at the last iteration. Only for problems + with inequality constraints. + execution_time : float + Total execution time. + message : str + Termination message. + status : {0, 1, 2, 3} + Termination status: + + * 0 : The maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `xtol` termination condition is satisfied. + * 3 : `callback` function requested termination. + + cg_stop_cond : int + Reason for CG subproblem termination at the last iteration: + + * 0 : CG subproblem not evaluated. + * 1 : Iteration limit was reached. + * 2 : Reached the trust-region boundary. + * 3 : Negative curvature detected. + * 4 : Tolerance was satisfied. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + x0 = np.atleast_1d(x0).astype(float) + n_vars = np.size(x0) + if hess is None: + if callable(hessp): + hess = HessianLinearOperator(hessp, n_vars) + else: + hess = BFGS() + if disp and verbose == 0: + verbose = 1 + + if bounds is not None: + modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf) + modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf) + modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb) + modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub) + bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible) + finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, + bounds.keep_feasible, n_vars) + else: + finite_diff_bounds = (-np.inf, np.inf) + + # Define Objective Function + objective = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, finite_diff_bounds) + + # Put constraints in list format when needed. + if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): + constraints = [constraints] + + # Prepare constraints. + prepared_constraints = [ + PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) + for c in constraints] + + # Check that all constraints are either sparse or dense. + n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) + if 0 < n_sparse < len(prepared_constraints): + raise ValueError("All constraints must have the same kind of the " + "Jacobian --- either all sparse or all dense. " + "You can set the sparsity globally by setting " + "`sparse_jacobian` to either True of False.") + if prepared_constraints: + sparse_jacobian = n_sparse > 0 + + if bounds is not None: + if sparse_jacobian is None: + sparse_jacobian = True + prepared_constraints.append(PreparedConstraint(bounds, x0, + sparse_jacobian)) + + # Concatenate initial constraints to the canonical form. + c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( + n_vars, prepared_constraints, sparse_jacobian) + + # Prepare all canonical constraints and concatenate it into one. + canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) + for c in prepared_constraints] + + if len(canonical_all) == 0: + canonical = CanonicalConstraint.empty(n_vars) + elif len(canonical_all) == 1: + canonical = canonical_all[0] + else: + canonical = CanonicalConstraint.concatenate(canonical_all, + sparse_jacobian) + + # Generate the Hessian of the Lagrangian. + lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) + + # Choose appropriate method + if canonical.n_ineq == 0: + method = 'equality_constrained_sqp' + else: + method = 'tr_interior_point' + + # Construct OptimizeResult + state = OptimizeResult( + nit=0, nfev=0, njev=0, nhev=0, + cg_niter=0, cg_stop_cond=0, + fun=objective.f, grad=objective.g, + lagrangian_grad=np.copy(objective.g), + constr=[c.fun.f for c in prepared_constraints], + jac=[c.fun.J for c in prepared_constraints], + constr_nfev=[0 for c in prepared_constraints], + constr_njev=[0 for c in prepared_constraints], + constr_nhev=[0 for c in prepared_constraints], + v=[c.fun.v for c in prepared_constraints], + method=method) + + # Start counting + start_time = time.time() + + # Define stop criteria + if method == 'equality_constrained_sqp': + def stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + tr_radius, constr_penalty, cg_info): + state = update_state_sqp(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + SQPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward-compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif state.tr_radius < xtol: + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + elif method == 'tr_interior_point': + def stop_criteria(state, x, last_iteration_failed, tr_radius, + constr_penalty, cg_info, barrier_parameter, + barrier_tolerance): + state = update_state_ip(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info, barrier_parameter, barrier_tolerance) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + IPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.barrier_parameter, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif (state.tr_radius < xtol + and state.barrier_parameter < barrier_tol): + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + + if verbose == 2: + BasicReport.print_header() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_header() + elif method == 'tr_interior_point': + IPReport.print_header() + + # Call inferior function to do the optimization + if method == 'equality_constrained_sqp': + def fun_and_constr(x): + f = objective.fun(x) + c_eq, _ = canonical.fun(x) + return f, c_eq + + def grad_and_jac(x): + g = objective.grad(x) + J_eq, _ = canonical.jac(x) + return g, J_eq + + _, result = equality_constrained_sqp( + fun_and_constr, grad_and_jac, lagrangian_hess, + x0, objective.f, objective.g, + c_eq0, J_eq0, + stop_criteria, state, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + elif method == 'tr_interior_point': + _, result = tr_interior_point( + objective.fun, objective.grad, lagrangian_hess, + n_vars, canonical.n_ineq, canonical.n_eq, + canonical.fun, canonical.jac, + x0, objective.f, objective.g, + c_ineq0, J_ineq0, c_eq0, J_eq0, + stop_criteria, + canonical.keep_feasible, + xtol, state, initial_barrier_parameter, + initial_barrier_tolerance, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + # Status 3 occurs when the callback function requests termination, + # this is assumed to not be a success. + result.success = True if result.status in (1, 2) else False + result.message = TERMINATION_MESSAGES[result.status] + + # Alias (for backward compatibility with 1.1.0) + result.niter = result.nit + + if verbose == 2: + BasicReport.print_footer() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_footer() + elif method == 'tr_interior_point': + IPReport.print_footer() + if verbose >= 1: + print(result.message) + print("Number of iterations: {}, function evaluations: {}, " + "CG iterations: {}, optimality: {:.2e}, " + "constraint violation: {:.2e}, execution time: {:4.2} s." + .format(result.nit, result.nfev, result.cg_niter, + result.optimality, result.constr_violation, + result.execution_time)) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py new file mode 100644 index 0000000000000000000000000000000000000000..a07b836bdbad688a265ae34ce91a361fd5050eb1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py @@ -0,0 +1,407 @@ +"""Basic linear factorizations needed by the solver.""" + +from scipy.sparse import (bmat, csc_matrix, eye, issparse) +from scipy.sparse.linalg import LinearOperator +import scipy.linalg +import scipy.sparse.linalg +try: + from sksparse.cholmod import cholesky_AAt + sksparse_available = True +except ImportError: + import warnings + sksparse_available = False +import numpy as np +from warnings import warn + +__all__ = [ + 'orthogonality', + 'projections', +] + + +def orthogonality(A, g): + """Measure orthogonality between a vector and the null space of a matrix. + + Compute a measure of orthogonality between the null space + of the (possibly sparse) matrix ``A`` and a given vector ``g``. + + The formula is a simplified (and cheaper) version of formula (3.13) + from [1]_. + ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + # Compute vector norms + norm_g = np.linalg.norm(g) + # Compute Froebnius norm of the matrix A + if issparse(A): + norm_A = scipy.sparse.linalg.norm(A, ord='fro') + else: + norm_A = np.linalg.norm(A, ord='fro') + + # Check if norms are zero + if norm_g == 0 or norm_A == 0: + return 0 + + norm_A_g = np.linalg.norm(A.dot(g)) + # Orthogonality measure + orth = norm_A_g / (norm_A*norm_g) + return orth + + +def normal_equation_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``NormalEquation`` approach. + """ + # Cholesky factorization + factor = cholesky_AAt(A) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + v = factor(A.dot(x)) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # z_next = z - A.T inv(A A.T) A z + v = factor(A.dot(z)) + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + return factor(A.dot(x)) + + # z = A.T inv(A A.T) x + def row_space(x): + return A.T.dot(factor(x)) + + return null_space, least_squares, row_space + + +def augmented_system_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A - ``AugmentedSystem``.""" + # Form augmented system + K = csc_matrix(bmat([[eye(n), A.T], [A, None]])) + # LU factorization + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + try: + solve = scipy.sparse.linalg.factorized(K) + except RuntimeError: + warn("Singular Jacobian matrix. Using dense SVD decomposition to " + "perform the factorizations.", + stacklevel=3) + return svd_factorization_projections(A.toarray(), + m, n, orth_tol, + max_refin, tol) + + # z = x - A.T inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [ z ] = [x] + # [A O ] [aux] [0] + def null_space(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + z = lu_sol[:n] + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.2. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # new_v = [x] - [I A.T] * [ z ] + # [0] [A O ] [aux] + new_v = v - K.dot(lu_sol) + # [I A.T] * [delta z ] = new_v + # [A O ] [delta aux] + lu_update = solve(new_v) + # [ z ] += [delta z ] + # [aux] [delta aux] + lu_sol += lu_update + z = lu_sol[:n] + k += 1 + + # return z = x - A.T inv(A A.T) A x + return z + + # z = inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [aux] = [x] + # [A O ] [ z ] [0] + def least_squares(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [aux] + # [ z ] + lu_sol = solve(v) + # return z = inv(A A.T) A x + return lu_sol[n:m+n] + + # z = A.T inv(A A.T) x + # is computed solving the extended system: + # [I A.T] * [ z ] = [0] + # [A O ] [aux] [x] + def row_space(x): + # v = [0] + # [x] + v = np.hstack([np.zeros(n), x]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + # return z = A.T inv(A A.T) x + return lu_sol[:n] + + return null_space, least_squares, row_space + + +def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``QRFactorization`` approach. + """ + # QRFactorization + Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic') + + if np.linalg.norm(R[-1, :], np.inf) < tol: + warn('Singular Jacobian matrix. Using SVD decomposition to ' + + 'perform the factorizations.', + stacklevel=3) + return svd_factorization_projections(A, m, n, + orth_tol, + max_refin, + tol) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v = np.zeros(m) + v[P] = aux2 + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = P inv(R) Q.T x + aux1 = Q.T.dot(z) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v[P] = aux2 + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + z = np.zeros(m) + z[P] = aux2 + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = Q inv(R.T) P.T x + aux1 = x[P] + aux2 = scipy.linalg.solve_triangular(R, aux1, + lower=False, + trans='T') + z = Q.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``SVDFactorization`` approach. + """ + # SVD Factorization + U, s, Vt = scipy.linalg.svd(A, full_matrices=False) + + # Remove dimensions related with very small singular values + U = U[:, s > tol] + Vt = Vt[s > tol, :] + s = s[s > tol] + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + v = U.dot(aux2) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(z) + aux2 = 1/s*aux1 + v = U.dot(aux2) + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + z = U.dot(aux2) + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = V 1/s U.T x + aux1 = U.T.dot(x) + aux2 = 1/s*aux1 + z = Vt.T.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15): + """Return three linear operators related with a given matrix A. + + Parameters + ---------- + A : sparse matrix (or ndarray), shape (m, n) + Matrix ``A`` used in the projection. + method : string, optional + Method used for compute the given linear + operators. Should be one of: + + - 'NormalEquation': The operators + will be computed using the + so-called normal equation approach + explained in [1]_. In order to do + so the Cholesky factorization of + ``(A A.T)`` is computed. Exclusive + for sparse matrices. + - 'AugmentedSystem': The operators + will be computed using the + so-called augmented system approach + explained in [1]_. Exclusive + for sparse matrices. + - 'QRFactorization': Compute projections + using QR factorization. Exclusive for + dense matrices. + - 'SVDFactorization': Compute projections + using SVD factorization. Exclusive for + dense matrices. + + orth_tol : float, optional + Tolerance for iterative refinements. + max_refin : int, optional + Maximum number of iterative refinements. + tol : float, optional + Tolerance for singular values. + + Returns + ------- + Z : LinearOperator, shape (n, n) + Null-space operator. For a given vector ``x``, + the null space operator is equivalent to apply + a projection matrix ``P = I - A.T inv(A A.T) A`` + to the vector. It can be shown that this is + equivalent to project ``x`` into the null space + of A. + LS : LinearOperator, shape (m, n) + Least-squares operator. For a given vector ``x``, + the least-squares operator is equivalent to apply a + pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A`` + to the vector. It can be shown that this vector + ``pinv(A.T) x`` is the least_square solution to + ``A.T y = x``. + Y : LinearOperator, shape (n, m) + Row-space operator. For a given vector ``x``, + the row-space operator is equivalent to apply a + projection matrix ``Q = A.T inv(A A.T)`` + to the vector. It can be shown that this + vector ``y = Q x`` the minimum norm solution + of ``A y = x``. + + Notes + ----- + Uses iterative refinements described in [1] + during the computation of ``Z`` in order to + cope with the possibility of large roundoff errors. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + m, n = np.shape(A) + + # The factorization of an empty matrix + # only works for the sparse representation. + if m*n == 0: + A = csc_matrix(A) + + # Check Argument + if issparse(A): + if method is None: + method = "AugmentedSystem" + if method not in ("NormalEquation", "AugmentedSystem"): + raise ValueError("Method not allowed for sparse matrix.") + if method == "NormalEquation" and not sksparse_available: + warnings.warn("Only accepts 'NormalEquation' option when " + "scikit-sparse is available. Using " + "'AugmentedSystem' option instead.", + ImportWarning, stacklevel=3) + method = 'AugmentedSystem' + else: + if method is None: + method = "QRFactorization" + if method not in ("QRFactorization", "SVDFactorization"): + raise ValueError("Method not allowed for dense array.") + + if method == 'NormalEquation': + null_space, least_squares, row_space \ + = normal_equation_projections(A, m, n, orth_tol, max_refin, tol) + elif method == 'AugmentedSystem': + null_space, least_squares, row_space \ + = augmented_system_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "QRFactorization": + null_space, least_squares, row_space \ + = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "SVDFactorization": + null_space, least_squares, row_space \ + = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) + + Z = LinearOperator((n, n), null_space) + LS = LinearOperator((m, n), least_squares) + Y = LinearOperator((n, m), row_space) + + return Z, LS, Y diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..a039a7738c283f90f30fd7c4583bf9e1a8f559d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py @@ -0,0 +1,637 @@ +"""Equality-constrained quadratic programming solvers.""" + +from scipy.sparse import (linalg, bmat, csc_matrix) +from math import copysign +import numpy as np +from numpy.linalg import norm + +__all__ = [ + 'eqp_kktfact', + 'sphere_intersections', + 'box_intersections', + 'box_sphere_intersections', + 'inside_box_boundaries', + 'modified_dogleg', + 'projected_cg' +] + + +# For comparison with the projected CG +def eqp_kktfact(H, c, A, b): + """Solve equality-constrained quadratic programming (EQP) problem. + + Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` + using direct factorization of the KKT system. + + Parameters + ---------- + H : sparse matrix, shape (n, n) + Hessian matrix of the EQP problem. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + A : sparse matrix + Jacobian matrix of the EQP problem. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + + Returns + ------- + x : array_like, shape (n,) + Solution of the KKT problem. + lagrange_multipliers : ndarray, shape (m,) + Lagrange multipliers of the KKT problem. + """ + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Karush-Kuhn-Tucker matrix of coefficients. + # Defined as in Nocedal/Wright "Numerical + # Optimization" p.452 in Eq. (16.4). + kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) + # Vector of coefficients. + kkt_vec = np.hstack([-c, -b]) + + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + lu = linalg.splu(kkt_matrix) + kkt_sol = lu.solve(kkt_vec) + x = kkt_sol[:n] + lagrange_multipliers = -kkt_sol[n:n+m] + + return x, lagrange_multipliers + + +def sphere_intersections(z, d, trust_radius, + entire_line=False): + """Find the intersection between segment (or line) and spherical constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the ball + ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the ball + ``||x|| <= trust_radius``. When ``False``, the function returns the intersection + between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the ball for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line/segment + and the sphere. On the other hand, when ``False``, there is no + intersection. + """ + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + # Check for inf trust_radius + if np.isinf(trust_radius): + if entire_line: + ta = -np.inf + tb = np.inf + else: + ta = 0 + tb = 1 + intersect = True + return ta, tb, intersect + + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + discriminant = b*b - 4*a*c + if discriminant < 0: + intersect = False + return 0, 0, intersect + sqrt_discriminant = np.sqrt(discriminant) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + ta, tb = sorted([ta, tb]) + + if entire_line: + intersect = True + else: + # Checks to see if intersection happens + # within vectors length. + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + intersect = True + # Restrict intersection interval + # between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_intersections(z, d, lb, ub, + entire_line=False): + """Find the intersection between segment (or line) and box constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the rectangular box + ``lb <= x <= ub``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular + box. When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the box for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and the rectangular box. On the other hand, when ``False``, there is no + intersection. + """ + # Make sure it is a numpy array + z = np.asarray(z) + d = np.asarray(d) + lb = np.asarray(lb) + ub = np.asarray(ub) + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + + # Get values for which d==0 + zero_d = (d == 0) + # If the boundaries are not satisfied for some coordinate + # for which "d" is zero, there is no box-line intersection. + if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): + intersect = False + return 0, 0, intersect + # Remove values for which d is zero + not_zero_d = np.logical_not(zero_d) + z = z[not_zero_d] + d = d[not_zero_d] + lb = lb[not_zero_d] + ub = ub[not_zero_d] + + # Find a series of intervals (t_lb[i], t_ub[i]). + t_lb = (lb-z) / d + t_ub = (ub-z) / d + # Get the intersection of all those intervals. + ta = max(np.minimum(t_lb, t_ub)) + tb = min(np.maximum(t_lb, t_ub)) + + # Check if intersection is feasible + if ta <= tb: + intersect = True + else: + intersect = False + # Checks to see if intersection happens within vectors length. + if not entire_line: + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + # Restrict intersection interval between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_sphere_intersections(z, d, lb, ub, trust_radius, + entire_line=False, + extra_info=False): + """Find the intersection between segment (or line) and box/sphere constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d``, the rectangular box + ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. + When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. + extra_info : bool, optional + When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the rectangular box and + inside the ball for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and both constraints. On the other hand, when ``False``, there is no + intersection. + sphere_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the ball. And a boolean value indicating + whether the sphere is intersected by the line. + box_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the box. And a boolean value indicating + whether the box is intersected by the line. + """ + ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, + entire_line) + ta_s, tb_s, intersect_s = sphere_intersections(z, d, + trust_radius, + entire_line) + ta = np.maximum(ta_b, ta_s) + tb = np.minimum(tb_b, tb_s) + if intersect_b and intersect_s and ta <= tb: + intersect = True + else: + intersect = False + + if extra_info: + sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} + box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} + return ta, tb, intersect, sphere_info, box_info + else: + return ta, tb, intersect + + +def inside_box_boundaries(x, lb, ub): + """Check if lb <= x <= ub.""" + return (lb <= x).all() and (x <= ub).all() + + +def reinforce_box_boundaries(x, lb, ub): + """Return clipped value of x""" + return np.minimum(np.maximum(x, lb), ub) + + +def modified_dogleg(A, Y, b, trust_radius, lb, ub): + """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. + + Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` + subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification + of the classical dogleg approach. + + Parameters + ---------- + A : LinearOperator (or sparse matrix or ndarray), shape (m, n) + Matrix ``A`` in the minimization problem. It should have + dimension ``(m, n)`` such that ``m < n``. + Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) + LinearOperator that apply the projection matrix + ``Q = A.T inv(A A.T)`` to the vector. The obtained vector + ``y = Q x`` being the minimum norm solution of ``A y = x``. + b : array_like, shape (m,) + Vector ``b``in the minimization problem. + trust_radius: float + Trust radius to be considered. Delimits a sphere boundary + to the problem. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. + It is expected that ``lb <= 0``, otherwise the algorithm + may fail. If ``lb[i] = -Inf``, the lower + bound for the ith component is just ignored. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. + It is expected that ``ub >= 0``, otherwise the algorithm + may fail. If ``ub[i] = Inf``, the upper bound for the ith + component is just ignored. + + Returns + ------- + x : array_like, shape (n,) + Solution to the problem. + + Notes + ----- + Based on implementations described in pp. 885-886 from [1]_. + + References + ---------- + .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + """ + # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. + newton_point = -Y.dot(b) + # Check for interior point + if inside_box_boundaries(newton_point, lb, ub) \ + and norm(newton_point) <= trust_radius: + x = newton_point + return x + + # Compute gradient vector ``g = A.T b`` + g = A.T.dot(b) + # Compute Cauchy point + # `cauchy_point = g.T g / (g.T A.T A g)``. + A_g = A.dot(g) + cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g + # Origin + origin_point = np.zeros_like(cauchy_point) + + # Check the segment between cauchy_point and newton_point + # for a possible solution. + z = cauchy_point + p = newton_point - cauchy_point + _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, + trust_radius) + if intersect: + x1 = z + alpha*p + else: + # Check the segment between the origin and cauchy_point + # for a possible solution. + z = origin_point + p = cauchy_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x1 = z + alpha*p + + # Check the segment between origin and newton_point + # for a possible solution. + z = origin_point + p = newton_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x2 = z + alpha*p + + # Return the best solution among x1 and x2. + if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): + return x1 + else: + return x2 + + +def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, + lb=None, ub=None, tol=None, + max_iter=None, max_infeasible_iter=None, + return_all=False): + """Solve EQP problem with projected CG method. + + Solve equality-constrained quadratic programming problem + ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, + possibly, to trust region constraints ``||x|| < trust_radius`` + and box constraints ``lb <= x <= ub``. + + Parameters + ---------- + H : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for computing ``H v``. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for projecting ``x`` into the null space of A. + Y : LinearOperator, sparse matrix, ndarray, shape (n, m) + Operator that, for a given a vector ``b``, compute smallest + norm solution of ``A x + b = 0``. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + trust_radius : float, optional + Trust radius to be considered. By default, uses ``trust_radius=inf``, + which means no trust radius at all. + lb : array_like, shape (n,), optional + Lower bounds to each one of the components of ``x``. + If ``lb[i] = -Inf`` the lower bound for the i-th + component is just ignored (default). + ub : array_like, shape (n, ), optional + Upper bounds to each one of the components of ``x``. + If ``ub[i] = Inf`` the upper bound for the i-th + component is just ignored (default). + tol : float, optional + Tolerance used to interrupt the algorithm. + max_iter : int, optional + Maximum algorithm iterations. Where ``max_inter <= n-m``. + By default, uses ``max_iter = n-m``. + max_infeasible_iter : int, optional + Maximum infeasible (regarding box constraints) iterations the + algorithm is allowed to take. + By default, uses ``max_infeasible_iter = n-m``. + return_all : bool, optional + When ``true``, return the list of all vectors through the iterations. + + Returns + ------- + x : array_like, shape (n,) + Solution of the EQP problem. + info : Dict + Dictionary containing the following: + + - niter : Number of iterations. + - stop_cond : Reason for algorithm termination: + 1. Iteration limit was reached; + 2. Reached the trust-region boundary; + 3. Negative curvature detected; + 4. Tolerance was satisfied. + - allvecs : List containing all intermediary vectors (optional). + - hits_boundary : True if the proposed step is on the boundary + of the trust region. + + Notes + ----- + Implementation of Algorithm 6.2 on [1]_. + + In the absence of spherical and box constraints, for sufficient + iterations, the method returns a truly optimal result. + In the presence of those constraints, the value returned is only + a inexpensive approximation of the optimal value. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + CLOSE_TO_ZERO = 1e-25 + + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Initial Values + x = Y.dot(-b) + r = Z.dot(H.dot(x) + c) + g = Z.dot(r) + p = -g + + # Store ``x`` value + if return_all: + allvecs = [x] + # Values for the first iteration + H_p = H.dot(p) + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + + # If x > trust-region the problem does not have a solution. + tr_distance = trust_radius - norm(x) + if tr_distance < 0: + raise ValueError("Trust region problem does not have a solution.") + # If x == trust_radius, then x is the solution + # to the optimization problem, since x is the + # minimum norm solution to Ax=b. + elif tr_distance < CLOSE_TO_ZERO: + info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} + if return_all: + allvecs.append(x) + info['allvecs'] = allvecs + return x, info + + # Set default tolerance + if tol is None: + tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) + # Set default lower and upper bounds + if lb is None: + lb = np.full(n, -np.inf) + if ub is None: + ub = np.full(n, np.inf) + # Set maximum iterations + if max_iter is None: + max_iter = n-m + max_iter = min(max_iter, n-m) + # Set maximum infeasible iterations + if max_infeasible_iter is None: + max_infeasible_iter = n-m + + hits_boundary = False + stop_cond = 1 + counter = 0 + last_feasible_x = np.zeros_like(x) + k = 0 + for i in range(max_iter): + # Stop criteria - Tolerance : r.T g < tol + if rt_g < tol: + stop_cond = 4 + break + k += 1 + # Compute curvature + pt_H_p = H_p.dot(p) + # Stop criteria - Negative curvature + if pt_H_p <= 0: + if np.isinf(trust_radius): + raise ValueError("Negative curvature not allowed " + "for unrestricted problems.") + else: + # Find intersection with constraints + _, alpha, intersect = box_sphere_intersections( + x, p, lb, ub, trust_radius, entire_line=True) + # Update solution + if intersect: + x = x + alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 3 + hits_boundary = True + break + + # Get next step + alpha = rt_g / pt_H_p + x_next = x + alpha*p + + # Stop criteria - Hits boundary + if np.linalg.norm(x_next) >= trust_radius: + # Find intersection with box constraints + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + # Update solution + if intersect: + x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 2 + hits_boundary = True + break + + # Check if ``x`` is inside the box and start counter if it is not. + if inside_box_boundaries(x_next, lb, ub): + counter = 0 + else: + counter += 1 + # Whenever outside box constraints keep looking for intersections. + if counter > 0: + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + if intersect: + last_feasible_x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + last_feasible_x = reinforce_box_boundaries(last_feasible_x, + lb, ub) + counter = 0 + # Stop after too many infeasible (regarding box constraints) iteration. + if counter > max_infeasible_iter: + break + # Store ``x_next`` value + if return_all: + allvecs.append(x_next) + + # Update residual + r_next = r + alpha*H_p + # Project residual g+ = Z r+ + g_next = Z.dot(r_next) + # Compute conjugate direction step d + rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) + beta = rt_g_next / rt_g + p = - g_next + beta*p + # Prepare for next iteration + x = x_next + g = g_next + r = g_next + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + H_p = H.dot(p) + + if not inside_box_boundaries(x, lb, ub): + x = last_feasible_x + hits_boundary = True + info = {'niter': k, 'stop_cond': stop_cond, + 'hits_boundary': hits_boundary} + if return_all: + info['allvecs'] = allvecs + return x, info diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3b2fb4ef5c90da78ae3f181159140e87393dcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py @@ -0,0 +1,51 @@ +"""Progress report printers.""" + +from __future__ import annotations + +class ReportBase: + COLUMN_NAMES: list[str] = NotImplemented + COLUMN_WIDTHS: list[int] = NotImplemented + ITERATION_FORMATS: list[str] = NotImplemented + + @classmethod + def print_header(cls): + fmt = ("|" + + "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS]) + + "|") + separators = ['-' * x for x in cls.COLUMN_WIDTHS] + print(fmt.format(*cls.COLUMN_NAMES)) + print(fmt.format(*separators)) + + @classmethod + def print_iteration(cls, *args): + iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS] + fmt = "|" + "|".join(iteration_format) + "|" + print(fmt.format(*args)) + + @classmethod + def print_footer(cls): + print() + + +class BasicReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", + "^10.2e", "^10.2e", "^10.2e"] + + +class SQPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^7"] + + +class IPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "barrier param", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^13.2e", "^7"] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3828fb6b77e748ac678ac2bbaec69f2d9efb27cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ce9570ed86640d22ec6ed40c3aed9561aa0823a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9490d2b680948723607e42a934e406abe36d6f3d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd971cc77e19581b33eb77345cc0678a758f1c08 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..452b327d02da3b3bd3fab9592bdef4d56d6aff57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py @@ -0,0 +1,296 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from scipy.optimize._constraints import (NonlinearConstraint, Bounds, + PreparedConstraint) +from scipy.optimize._trustregion_constr.canonical_constraint \ + import CanonicalConstraint, initial_constraints_as_canonical + + +def create_quadratic_function(n, m, rng): + a = rng.rand(m) + A = rng.rand(m, n) + H = rng.rand(m, n, n) + HT = np.transpose(H, (1, 2, 0)) + + def fun(x): + return a + A.dot(x) + 0.5 * H.dot(x).dot(x) + + def jac(x): + return A + H.dot(x) + + def hess(x, v): + return HT.dot(v) + + return fun, jac, hess + + +def test_bounds_cases(): + # Test 1: no constraints. + user_constraint = Bounds(-np.inf, np.inf) + x0 = np.array([-1, 2]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 2))) + assert_array_equal(J_ineq, np.empty((0, 2))) + + assert_array_equal(c.keep_feasible, []) + + # Test 2: infinite lower bound. + user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True]) + x0 = np.array([-1, -2, -3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -4]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]])) + + assert_array_equal(c.keep_feasible, [False, True]) + + # Test 3: infinite upper bound. + user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True]) + x0 = np.array([1, 2, 3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -1]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]])) + + assert_array_equal(c.keep_feasible, [True, False]) + + # Test 4: interval constraint. + user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3], + [False, True, True, True]) + x0 = np.array([0, 10, 8, 5]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 1) + assert_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [2]) + assert_array_equal(c_ineq, [-1, -2, -1, -6]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, [[0, 0, 0, 1]]) + assert_array_equal(J_ineq, [[1, 0, 0, 0], + [0, 0, 1, 0], + [-1, 0, 0, 0], + [0, 0, -1, 0]]) + + assert_array_equal(c.keep_feasible, [False, True, False, True]) + + +def test_nonlinear_constraint(): + n = 3 + m = 5 + rng = np.random.RandomState(0) + x0 = rng.rand(n) + + fun, jac, hess = create_quadratic_function(n, m, rng) + f = fun(x0) + J = jac(x0) + + lb = [-10, 3, -np.inf, -np.inf, -5] + ub = [10, 3, np.inf, 3, np.inf] + user_constraint = NonlinearConstraint( + fun, lb, ub, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + prepared_constraint = PreparedConstraint(user_constraint, x0, + sparse_jacobian) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_array_equal(c.n_eq, 1) + assert_array_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f[1] - lb[1]]) + assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4], + f[0] - ub[0], lb[0] - f[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, J[1, None]) + assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(m) + v[1] = v_eq[0] + v[3] = v_ineq[0] + v[4] = -v_ineq[1] + v[0] = v_ineq[2] - v_ineq[3] + assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v)) + + assert_array_equal(c.keep_feasible, [True, False, True, True]) + + +def test_concatenation(): + rng = np.random.RandomState(0) + n = 4 + x0 = rng.rand(n) + + f1 = x0 + J1 = np.eye(n) + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + f2 = fun(x0) + J2 = jac(x0) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared) + c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared) + c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian) + + assert_equal(c.n_eq, 2) + assert_equal(c.n_ineq, 7) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(5) + v[1] = v_eq[1] + v[3] = v_ineq[3] + v[4] = -v_ineq[4] + v[0] = v_ineq[5] - v_ineq[6] + H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n)) + assert_array_equal(H, hess(x0, v)) + + assert_array_equal(c.keep_feasible, + [True, False, False, True, False, True, True]) + + +def test_empty(): + x = np.array([1, 2, 3]) + c = CanonicalConstraint.empty(3) + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.empty((0, 3))) + + H = c.hess(x, None, None).toarray() + assert_array_equal(H, np.zeros((3, 3))) + + +def test_initial_constraints_as_canonical(): + # rng is only used to generate the coefficients of the quadratic + # function that is used by the nonlinear constraint. + rng = np.random.RandomState(0) + + x0 = np.array([0.5, 0.4, 0.3, 0.2]) + n = len(x0) + + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + f1 = bounds_prepared.fun.f + J1 = bounds_prepared.fun.J + f2 = nonlinear_prepared.fun.f + J2 = nonlinear_prepared.fun.J + + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [bounds_prepared, nonlinear_prepared], sparse_jacobian) + + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + if sparse_jacobian: + J1 = J1.toarray() + J2 = J2.toarray() + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + +def test_initial_constraints_as_canonical_empty(): + n = 3 + for sparse_jacobian in [False, True]: + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [], sparse_jacobian) + + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.empty((0, n))) + assert_array_equal(J_ineq, np.empty((0, n))) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff3c39d649d0ac663d9b71bb906f1daac021118 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py @@ -0,0 +1,214 @@ +import numpy as np +import scipy.linalg +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.projections \ + import projections, orthogonality +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_equal, assert_allclose) + +try: + from sksparse.cholmod import cholesky_AAt # noqa: F401 + sksparse_available = True + available_sparse_methods = ("NormalEquation", "AugmentedSystem") +except ImportError: + sksparse_available = False + available_sparse_methods = ("AugmentedSystem",) +available_dense_methods = ('QRFactorization', 'SVDFactorization') + + +class TestProjections(TestCase): + + def test_nullspace_and_least_squares_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At_dense = A_dense.T + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At_dense, z)[0] + assert_array_almost_equal(x, x2) + + def test_iterative_refinements_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + atol = 1e-13 * abs(x).max() + assert_allclose(A.dot(x), 0, atol=atol) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, atol=1e-13) + + def test_rowspace_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_sparse_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A_dense, x)) + assert_equal(np.linalg.matrix_rank(A_dense), + np.linalg.matrix_rank(A_ext)) + + def test_nullspace_and_least_squares_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At = A.T + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At, z)[0] + assert_array_almost_equal(x, x2) + + def test_compare_dense_and_sparse(self): + D = np.diag(range(1, 101)) + A = np.hstack([D, D, D, D]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(20): + z = np.random.normal(size=(400,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(100,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_compare_dense_and_sparse2(self): + D1 = np.diag([-1.7, 1, 0.5]) + D2 = np.diag([1, -0.6, -0.3]) + D3 = np.diag([-0.3, -1.5, 2]) + A = np.hstack([D1, D2, D3]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(1): + z = np.random.normal(size=(9,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(3,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_iterative_refinements_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16) + + def test_rowspace_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_dense_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A, x)) + assert_equal(np.linalg.matrix_rank(A), + np.linalg.matrix_rank(A_ext)) + + +class TestOrthogonality(TestCase): + + def test_dense_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) + + def test_sparse_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..e0235caace6c19563efc31fdf4b8e41d9d81819b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py @@ -0,0 +1,645 @@ +import numpy as np +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.qp_subproblem \ + import (eqp_kktfact, + projected_cg, + box_intersections, + sphere_intersections, + box_sphere_intersections, + modified_dogleg) +from scipy.optimize._trustregion_constr.projections \ + import projections +from numpy.testing import TestCase, assert_array_almost_equal, assert_equal +import pytest + + +class TestEQPDirectFactorization(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + x, lagrange_multipliers = eqp_kktfact(H, c, A, b) + assert_array_almost_equal(x, [2, -1, 1]) + assert_array_almost_equal(lagrange_multipliers, [3, -2]) + + +class TestSphericalBoundariesIntersections(TestCase): + + def test_2d_sphere_constraints(self): + # Interior inicial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1) + assert_equal(intersect, False) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_sphere_constraints_line_intersections(self): + # Interior initial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1, + entire_line=True) + assert_equal(intersect, False) + + # Outside initial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1, + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Outside initial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 3.5]) + assert_equal(intersect, True) + + # Initial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + +class TestBoxBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1]) + assert_equal(intersect, False) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Interior initial point + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2]) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3]) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2]) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_box_constraints_entire_line(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 1.5]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1.5, -0.5]) + assert_equal(intersect, True) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, np.inf]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Interior initial pointoint + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + + # Initial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + def test_3d_box_constraints(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3]) + assert_equal(intersect, False) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + def test_3d_box_constraints_entire_line(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Interior point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1, 1]) + assert_equal(intersect, True) + + +class TestBoxSphereBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=False) + assert_equal(intersect, False) + + def test_2d_box_constraints_entire_line(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 2]) + assert_equal(intersect, True) + + # Box constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=True) + assert_equal(intersect, False) + + +class TestModifiedDogleg(TestCase): + + def test_cauchypoint_equalsto_newtonpoint(self): + A = np.array([[1, 8]]) + b = np.array([-16]) + _, _, Y = projections(A) + newton_point = np.array([0.24615385, 1.96923077]) + + # Newton point inside boundaries + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # Spherical constraint active + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point)) + + # Box constraints active + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf]) + assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1) + + def test_3d_example(self): + A = np.array([[1, 8, 1], + [4, 2, 2]]) + b = np.array([-16, 2]) + Z, LS, Y = projections(A) + + newton_point = np.array([-1.37090909, 2.23272727, -0.49090909]) + cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585]) + origin = np.zeros_like(newton_point) + + # newton_point inside boundaries + x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # line between cauchy_point and newton_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.40807330)) + assert_array_almost_equal(np.linalg.norm(x), 2) + + # line between cauchy_point and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.7498195)) + assert_array_almost_equal(x[0], -1) + + # line between origin and cauchy_point contains best point + # (spherical constraint is active). + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = origin + d = cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.573936265)) + assert_array_almost_equal(np.linalg.norm(x), 1) + + # line between origin and newton_point contains best point + # (box constraint is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, 1, np.inf]) + z = origin + d = newton_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, np.full(3, 0.4478827364)) + assert_array_almost_equal(x[1], 1) + + +class TestProjectCG(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b) + assert_equal(info["stop_cond"], 4) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, [2, -1, 1]) + + def test_compare_with_direct_fact(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, tol=0) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + def test_trust_region_infeasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 1 + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, trust_radius=trust_radius) + + def test_trust_region_barely_feasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 2.32379000772445021283 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + assert_array_almost_equal(x, -Y.dot(b)) + + def test_hits_boundary(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + def test_negative_curvature_unconstrained(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, tol=0) + + def test_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + # The box constraints are inactive at the solution but + # are active during the iterations. + def test_inactive_box_constraints(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.5, -np.inf, + -np.inf, -np.inf], + return_all=True) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + # The box constraints active and the termination is + # by maximum iterations (infeasible interaction). + def test_active_box_constraints_maximum_iterations_reached(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.8, -np.inf, + -np.inf, -np.inf], + return_all=True) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(A.dot(x), -b) + assert_array_almost_equal(x[0], 0.8) + + # The box constraints are active and the termination is + # because it hits boundary (without infeasible interaction). + def test_active_box_constraints_hits_boundaries(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 1.6, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 1.6) + + # The box constraints are active and the termination is + # because it hits boundary (infeasible interaction). + def test_active_box_constraints_hits_boundaries_infeasible_iter(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 4 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, 0.1, np.inf, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[1], 0.1) + + # The box constraints are active and the termination is + # because it hits boundary (no infeasible interaction). + def test_active_box_constraints_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 100, np.inf], + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 100) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py new file mode 100644 index 0000000000000000000000000000000000000000..e08d3e8490044caceb1bf3e6ab2cd6c37c5c8f70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_report.py @@ -0,0 +1,32 @@ +import numpy as np +from scipy.optimize import minimize, Bounds + +def test_gh10880(): + # checks that verbose reporting works with trust-constr for + # bound-contrained problems + bnds = Bounds(1, 2) + opts = {'maxiter': 1000, 'verbose': 2} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(lambda x: x**2, x0=2., method='trust-constr', + bounds=bnds, options=opts) + +def test_gh12922(): + # checks that verbose reporting works with trust-constr for + # general constraints + def objective(x): + return np.array([(np.sum((x+1)**4))]) + + cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2} + n = 25 + x0 = np.linspace(-5, 5, n) + + opts = {'maxiter': 1000, 'verbose': 2} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) + + opts = {'maxiter': 1000, 'verbose': 3} + minimize(objective, x0=x0, method='trust-constr', + constraints=cons, options=opts) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py new file mode 100644 index 0000000000000000000000000000000000000000..121143fad2a8df3a8986beffc5043622d9ace993 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py @@ -0,0 +1,346 @@ +"""Trust-region interior point method. + +References +---------- +.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. +.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal. + "On the local behavior of an interior point method for + nonlinear programming." Numerical analysis 1997 (1997): 37-56. +.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). +""" + +import scipy.sparse as sps +import numpy as np +from .equality_constrained_sqp import equality_constrained_sqp +from scipy.sparse.linalg import LinearOperator + +__all__ = ['tr_interior_point'] + + +class BarrierSubproblem: + """ + Barrier optimization problem: + minimize fun(x) - barrier_parameter*sum(log(s)) + subject to: constr_eq(x) = 0 + constr_ineq(x) + s = 0 + """ + + def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, barrier_parameter, tolerance, + enforce_feasibility, global_stop_criteria, + xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0): + # Store parameters + self.n_vars = n_vars + self.x0 = x0 + self.s0 = s0 + self.fun = fun + self.grad = grad + self.lagr_hess = lagr_hess + self.constr = constr + self.jac = jac + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + self.n_eq = n_eq + self.n_ineq = n_ineq + self.enforce_feasibility = enforce_feasibility + self.global_stop_criteria = global_stop_criteria + self.xtol = xtol + self.fun0 = self._compute_function(fun0, constr_ineq0, s0) + self.grad0 = self._compute_gradient(grad0) + self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0) + self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0) + self.terminate = False + + def update(self, barrier_parameter, tolerance): + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + + def get_slack(self, z): + return z[self.n_vars:self.n_vars+self.n_ineq] + + def get_variables(self, z): + return z[:self.n_vars] + + def function_and_constraints(self, z): + """Returns barrier function and constraints at given point. + + For z = [x, s], returns barrier function: + function(z) = fun(x) - barrier_parameter*sum(log(s)) + and barrier constraints: + constraints(z) = [ constr_eq(x) ] + [ constr_ineq(x) + s ] + + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute function and constraints + f = self.fun(x) + c_eq, c_ineq = self.constr(x) + # Return objective function and constraints + return (self._compute_function(f, c_ineq, s), + self._compute_constr(c_ineq, c_eq, s)) + + def _compute_function(self, f, c_ineq, s): + # Use technique from Nocedal and Wright book, ref [3]_, p.576, + # to guarantee constraints from `enforce_feasibility` + # stay feasible along iterations. + s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility] + log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s] + # Compute barrier objective function + return f - self.barrier_parameter*np.sum(log_s) + + def _compute_constr(self, c_ineq, c_eq, s): + # Compute barrier constraint + return np.hstack((c_eq, + c_ineq + s)) + + def scaling(self, z): + """Returns scaling vector. + Given by: + scaling = [ones(n_vars), s] + """ + s = self.get_slack(z) + diag_elements = np.hstack((np.ones(self.n_vars), s)) + + # Diagonal matrix + def matvec(vec): + return diag_elements*vec + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def gradient_and_jacobian(self, z): + """Returns scaled gradient. + + Return scaled gradient: + gradient = [ grad(x) ] + [ -barrier_parameter*ones(n_ineq) ] + and scaled Jacobian matrix: + jacobian = [ jac_eq(x) 0 ] + [ jac_ineq(x) S ] + Both of them scaled by the previously defined scaling factor. + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute first derivatives + g = self.grad(x) + J_eq, J_ineq = self.jac(x) + # Return gradient and Jacobian + return (self._compute_gradient(g), + self._compute_jacobian(J_eq, J_ineq, s)) + + def _compute_gradient(self, g): + return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq))) + + def _compute_jacobian(self, J_eq, J_ineq, s): + if self.n_ineq == 0: + return J_eq + else: + if sps.issparse(J_eq) or sps.issparse(J_ineq): + # It is expected that J_eq and J_ineq + # are already `csr_matrix` because of + # the way ``BoxConstraint``, ``NonlinearConstraint`` + # and ``LinearConstraint`` are defined. + J_eq = sps.csr_matrix(J_eq) + J_ineq = sps.csr_matrix(J_ineq) + return self._assemble_sparse_jacobian(J_eq, J_ineq, s) + else: + S = np.diag(s) + zeros = np.zeros((self.n_eq, self.n_ineq)) + # Convert to matrix + if sps.issparse(J_ineq): + J_ineq = J_ineq.toarray() + if sps.issparse(J_eq): + J_eq = J_eq.toarray() + # Concatenate matrices + return np.block([[J_eq, zeros], + [J_ineq, S]]) + + def _assemble_sparse_jacobian(self, J_eq, J_ineq, s): + """Assemble sparse Jacobian given its components. + + Given ``J_eq``, ``J_ineq`` and ``s`` returns: + jacobian = [ J_eq, 0 ] + [ J_ineq, diag(s) ] + + It is equivalent to: + sps.bmat([[ J_eq, None ], + [ J_ineq, diag(s) ]], "csr") + but significantly more efficient for this + given structure. + """ + n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq + J_aux = sps.vstack([J_eq, J_ineq], "csr") + indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data + new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), + np.arange(n_ineq+1, dtype=int))) + size = indices.size+n_ineq + new_indices = np.empty(size) + new_data = np.empty(size) + mask = np.full(size, False, bool) + mask[new_indptr[-n_ineq:]-1] = True + new_indices[mask] = n_vars+np.arange(n_ineq) + new_indices[~mask] = indices + new_data[mask] = s + new_data[~mask] = data + J = sps.csr_matrix((new_data, new_indices, new_indptr), + (n_eq + n_ineq, n_vars + n_ineq)) + return J + + def lagrangian_hessian_x(self, z, v): + """Returns Lagrangian Hessian (in relation to `x`) -> Hx""" + x = self.get_variables(z) + # Get lagrange multipliers related to nonlinear equality constraints + v_eq = v[:self.n_eq] + # Get lagrange multipliers related to nonlinear ineq. constraints + v_ineq = v[self.n_eq:self.n_eq+self.n_ineq] + lagr_hess = self.lagr_hess + return lagr_hess(x, v_eq, v_ineq) + + def lagrangian_hessian_s(self, z, v): + """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S""" + s = self.get_slack(z) + # Using the primal formulation: + # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s). + # Reference [1]_ p. 882, formula (3.1) + primal = self.barrier_parameter + # Using the primal-dual formulation + # S Hs S = diag(s)*diag(v/s)*diag(s) + # Reference [1]_ p. 883, formula (3.11) + primal_dual = v[-self.n_ineq:]*s + # Uses the primal-dual formulation for + # positives values of v_ineq, and primal + # formulation for the remaining ones. + return np.where(v[-self.n_ineq:] > 0, primal_dual, primal) + + def lagrangian_hessian(self, z, v): + """Returns scaled Lagrangian Hessian""" + # Compute Hessian in relation to x and s + Hx = self.lagrangian_hessian_x(z, v) + if self.n_ineq > 0: + S_Hs_S = self.lagrangian_hessian_s(z, v) + + # The scaled Lagragian Hessian is: + # [ Hx 0 ] + # [ 0 S Hs S ] + def matvec(vec): + vec_x = self.get_variables(vec) + vec_s = self.get_slack(vec) + if self.n_ineq > 0: + return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) + else: + return Hx.dot(vec_x) + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def stop_criteria(self, state, z, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + """Stop criteria to the barrier problem. + The criteria here proposed is similar to formula (2.3) + from [1]_, p.879. + """ + x = self.get_variables(z) + if self.global_stop_criteria(state, x, + last_iteration_failed, + trust_radius, penalty, + cg_info, + self.barrier_parameter, + self.tolerance): + self.terminate = True + return True + else: + g_cond = (optimality < self.tolerance and + constr_violation < self.tolerance) + x_cond = trust_radius < self.xtol + return g_cond or x_cond + + +def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, x0, fun0, grad0, + constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0, stop_criteria, + enforce_feasibility, xtol, state, + initial_barrier_parameter, + initial_tolerance, + initial_penalty, + initial_trust_radius, + factorization_method): + """Trust-region interior points method. + + Solve problem: + minimize fun(x) + subject to: constr_ineq(x) <= 0 + constr_eq(x) = 0 + using trust-region interior point method described in [1]_. + """ + # BOUNDARY_PARAMETER controls the decrease on the slack + # variables. Represents ``tau`` from [1]_ p.885, formula (3.18). + BOUNDARY_PARAMETER = 0.995 + # BARRIER_DECAY_RATIO controls the decay of the barrier parameter + # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879. + BARRIER_DECAY_RATIO = 0.2 + # TRUST_ENLARGEMENT controls the enlargement on trust radius + # after each iteration + TRUST_ENLARGEMENT = 5 + + # Default enforce_feasibility + if enforce_feasibility is None: + enforce_feasibility = np.zeros(n_ineq, bool) + # Initial Values + barrier_parameter = initial_barrier_parameter + tolerance = initial_tolerance + trust_radius = initial_trust_radius + # Define initial value for the slack variables + s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq)) + # Define barrier subproblem + subprob = BarrierSubproblem( + x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, + barrier_parameter, tolerance, enforce_feasibility, + stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, + constr_eq0, jac_eq0) + # Define initial parameter for the first iteration. + z = np.hstack((x0, s0)) + fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0 + grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0 + # Define trust region bounds + trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf), + np.full(subprob.n_ineq, -BOUNDARY_PARAMETER))) + trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf) + + # Solves a sequence of barrier problems + while True: + # Solve SQP subproblem + z, state = equality_constrained_sqp( + subprob.function_and_constraints, + subprob.gradient_and_jacobian, + subprob.lagrangian_hessian, + z, fun0_subprob, grad0_subprob, + constr0_subprob, jac0_subprob, subprob.stop_criteria, + state, initial_penalty, trust_radius, + factorization_method, trust_lb, trust_ub, subprob.scaling) + if subprob.terminate: + break + # Update parameters + trust_radius = max(initial_trust_radius, + TRUST_ENLARGEMENT*state.tr_radius) + # TODO: Use more advanced strategies from [2]_ + # to update this parameters. + barrier_parameter *= BARRIER_DECAY_RATIO + tolerance *= BARRIER_DECAY_RATIO + # Update Barrier Problem + subprob.update(barrier_parameter, tolerance) + # Compute initial values for next iteration + fun0_subprob, constr0_subprob = subprob.function_and_constraints(z) + grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z) + + # Get x and s + x = subprob.get_variables(z) + return x, state diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a66a6b4c2e5eb8580ecc3c1f06c3cea5256417d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff073a95bcf9ec829b78efed69bc795e2641e4bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07b3f5feb140a5c9147e4fedcd3ee7a24d250ae5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e816baeaaf593de3c85d2443966d4165694c1e1b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8886e47efb0ce89149246171982e516b17563e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aa6b4c042351325aec04456e0127eeec191fcd6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d884993f491ae2cb71709a761e5c47d0d645f961 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..304b795324e8cbe09b0ff6a94303964b8ee65fde Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b665b6179bdbbc44d13567a1b8af53e1168d5672 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57dd1d7cdf2bccdc404e6eff188293bb9bd1eac4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..200b2ab2acdd7cf147bdd1b3c7d7c0a625f23b12 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0b0cbb6d9b0e6e6a34d487059c1b69090c1da3c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89f3043ad245a528248b7954b55b088ffb143354 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4fe115d3834a3c5cec7f949da22501bffd09b74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..242676724d8cd8252a9de0f33227f45d058d2907 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d756bb429cd584a131649f394020353a64f4e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..940538f8d9a2a2f388e8a036d43f9e64cbd9597f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257a320957fed2e8fa03b9cc6aec38c2a0995d41 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8516c4ddbb810c6170e4e359f9197b6703ed7bff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93095f85212d53b82bf3deb284f905aef4fbb77c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..599c16200914eaf2b2d34199851563242d829768 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fdc0c61addb76bfe16ee1a2da9dae015349f290 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..682d3df428fa8bcf27dc2945131eee3b89c4f0d2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6179d4289a8308b1621f731ce23f61a01a08ff6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..013dae557f46d23179eb84ea68e0d489c38b852d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6bb6fb7e5b160a4a1bcb096fec9f874af9a8cb0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae23bcd71874a90918918a9b8a650ead4cea9e29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2165381ee733a46bacd09ced8091e6c9eeca67d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebe25e315a2a817abc856863ab781d237ca749ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7a45ab0bea7e6252ca5d2bae44aa146466a6498 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2ece213fe8545767e8e5d3c72d7194781323895 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1086f642cc33ed5fc1ffc0e2f8bdf7b61d84d78a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dcb247e2e8ebd77bd720165cbe4640c5b9b94a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75fcab56957b716aed030d0336417d17c96d7828 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f5856bccb1c22124c12179875ff628ee8750795 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9df4154c2fcbd3401016de8140146a60a44e9aca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b10852f9f8ffae2bbc377ef6ab3a220bf71631ad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5829594acfbf50f2fdcf6be7a7d524a15b237830 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba6fa26d31abcbaacd6e8656e4e0f841bc6ad766 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d5d7c73715cfa7584ea5770179a46dc8d82433a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b363be64f015991eb96f89da3aa768a106050ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..813ba1a5eac4fc51c4bec138290a758afd1c5e67 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc differ