diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3a22e9f404e7537b549f3c3a85a4f4529f8c0e6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bracket.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bracket.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7726c234a0d50237e3e3a4f5e1c7f0681dc601 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_bracket.py @@ -0,0 +1,663 @@ +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +_ELIMITS = -1 # used in _bracket_root +_ESTOPONESIDE = 2 # used in _bracket_root + +def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xl0 = np.asarray(xl0)[()] + if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any(): + raise ValueError('`xl0` must be numeric and real.') + + xr0 = xl0 + 1 if xr0 is None else xr0 + xmin = -np.inf if xmin is None else xmin + xmax = np.inf if xmax is None else xmax + factor = 2. if factor is None else factor + xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor) + + if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any(): + raise ValueError('`xr0` must be numeric and real.') + + if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any(): + raise ValueError('`xmin` must be numeric and real.') + + if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any(): + raise ValueError('`xmax` must be numeric and real.') + + if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any(): + raise ValueError('`factor` must be numeric and real.') + if not np.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + maxiter = np.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple() + or np.iscomplex(maxiter)): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + if not np.all((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax)): + raise ValueError('`xmin <= xl0 < xr0 <= xmax` must be True (elementwise).') + + return func, xl0, xr0, xmin, xmax, factor, args, maxiter + + +def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None, + args=(), maxiter=1000): + """Bracket the root of a monotonic scalar function of one variable + + This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and + the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the root is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + xl0, xr0: float array_like + Starting guess of bracket, which need not contain a root. If `xr0` is + not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0` and `xr0`. + factor : float array_like, default: 2 + The factor used to grow the bracket. See notes for details. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be + bracketed requires arguments that are not broadcastable with these + arrays, wrap that callable with `func` such that `func` accepts + only `x` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xr : float + The lower and upper ends of the bracket, if the algorithm + terminated successfully. + fl, fr : float + The function value at the lower and upper ends of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + This is distinct from the number of times `func` is *called* + because the function may evaluated at multiple points in a single + call. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits without finding a bracket. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + - ``2`` : A bracket was found in the opposite search direction (in `callback` only). + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + This function generalizes an algorithm found in pieces throughout + `scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)` + until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows. + + - If `xmin` is not provided, the distance between `xl0` and `l` is iteratively + increased by `factor`. + - If `xmin` is provided, the distance between `xmin` and `l` is iteratively + decreased by `factor`. Note that this also *increases* the bracket size. + + Growth of the bracket to the right is analogous. + + Growth of the bracket in one direction stops when the endpoint is no longer + finite, the function value at the endpoint is no longer finite, or the + endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates + when the bracket stops growing in both directions, the bracket surrounds + the root, or a root is found (accidentally). + + If two brackets are found - that is, a bracket is found on both sides in + the same iteration, the smaller of the two is returned. + If roots of the function are found, both `l` and `r` are set to the + leftmost root. + + """ # noqa: E501 + # Todo: + # - find bracket with sign change in specified direction + # - Add tolerance + # - allow factor < 1? + + callback = None # works; I just don't want to test it + temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp + + xs = (xl0, xr0) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype = temp # line split for PEP8 + + # The approach is to treat the left and right searches as though they were + # (almost) totally independent one-sided bracket searches. (The interaction + # is considered when checking for termination and preparing the result + # object.) + # `x` is the "moving" end of the bracket + x = np.concatenate(xs) + f = np.concatenate(fs) + n = len(x) // 2 + + # `x_last` is the previous location of the moving end of the bracket. If + # the signs of `f` and `f_last` are different, `x` and `x_last` form a + # bracket. + x_last = np.concatenate((x[n:], x[:n])) + f_last = np.concatenate((f[n:], f[:n])) + # `x0` is the "fixed" end of the bracket. + x0 = x_last + # We don't need to retain the corresponding function value, since the + # fixed end of the bracket is only needed to compute the new value of the + # moving end; it is never returned. + + xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel() + xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel() + limit = np.concatenate((xmin, xmax)) + + factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel() + factor = np.concatenate((factor, factor)) + + active = np.arange(2*n) + args = [np.concatenate((arg, arg)) for arg in args] + + # This is needed due to inner workings of `eim._loop`. + # We're abusing it a tiny bit. + shape = shape + (2,) + + # `d` is for "distance". + # For searches without a limit, the distance between the fixed end of the + # bracket `x0` and the moving end `x` will grow by `factor` each iteration. + # For searches with a limit, the distance between the `limit` and moving + # end of the bracket `x` will shrink by `factor` each iteration. + i = np.isinf(limit) + ni = ~i + d = np.zeros_like(x) + d[i] = x[i] - x0[i] + d[ni] = limit[ni] - x[ni] + + status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 1 # one function evaluation per side performed above + + work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor, + active=active, d=d, x_last=x_last, f_last=f_last, + nit=nit, nfev=nfev, status=status, args=args, + xl=None, xr=None, fl=None, fr=None, n=n) + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), + ('fr', 'fr'), ('x', 'x'), ('f', 'f'), + ('x_last', 'x_last'), ('f_last', 'f_last')] + + def pre_func_eval(work): + # Initialize moving end of bracket + x = np.zeros_like(work.x) + + # Unlimited brackets grow by `factor` by increasing distance from fixed + # end to moving end. + i = np.isinf(work.limit) # indices of unlimited brackets + work.d[i] *= work.factor[i] + x[i] = work.x0[i] + work.d[i] + + # Limited brackets grow by decreasing the distance from the limit to + # the moving end. + ni = ~i # indices of limited brackets + work.d[ni] /= work.factor[ni] + x[ni] = work.limit[ni] - work.d[ni] + + return x + + def post_func_eval(x, f, work): + # Keep track of the previous location of the moving end so that we can + # return a narrower bracket. (The alternative is to remember the + # original fixed end, but then the bracket would be wider than needed.) + work.x_last = work.x + work.f_last = work.f + work.x = x + work.f = f + + def check_termination(work): + stop = np.zeros_like(work.x, dtype=bool) + + # Condition 1: a valid bracket (or the root itself) has been found + sf = np.sign(work.f) + sf_last = np.sign(work.f_last) + i = (sf_last == -sf) | (sf_last == 0) | (sf == 0) + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Condition 2: the other side's search found a valid bracket. + # (If we just found a bracket with the rightward search, we can stop + # the leftward search, and vice-versa.) + # To do this, we need to set the status of the other side's search; + # this is tricky because `work.status` contains only the *active* + # elements, so we don't immediately know the index of the element we + # need to set - or even if it's still there. (That search may have + # terminated already, e.g. by reaching its `limit`.) + # To facilitate this, `work.active` contains a unit integer index of + # each search. Index `k` (`k < n)` and `k + n` correspond with a + # leftward and rightward search, respectively. Elements are removed + # from `work.active` just as they are removed from `work.status`, so + # we use `work.active` to help find the right location in + # `work.status`. + # Get the integer indices of the elements that can also stop + also_stop = (work.active[i] + work.n) % (2*work.n) + # Check whether they are still active. + # To start, we need to find out where in `work.active` they would + # appear if they are indeed there. + j = np.searchsorted(work.active, also_stop) + # If the location exceeds the length of the `work.active`, they are + # not there. + j = j[j < len(work.active)] + # Check whether they are still there. + j = j[also_stop == work.active[j]] + # Now convert these to boolean indices to use with `work.status`. + i = np.zeros_like(stop) + i[j] = True # boolean indices of elements that can also stop + i = i & ~stop + work.status[i] = _ESTOPONESIDE + stop[i] = True + + # Condition 3: moving end of bracket reaches limit + i = (work.x == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 4: non-finite value encountered + i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + n = len(res['x']) // 2 + + # To avoid ambiguity, below we refer to `xl0`, the initial left endpoint + # as `a` and `xr0`, the initial right endpoint, as `b`. + # Because we treat the two one-sided searches as though they were + # independent, what we keep track of in `work` and what we want to + # return in `res` look quite different. Combine the results from the + # two one-sided searches before reporting the results to the user. + # - "a" refers to the leftward search (the moving end started at `a`) + # - "b" refers to the rightward search (the moving end started at `b`) + # - "l" refers to the left end of the bracket (closer to -oo) + # - "r" refers to the right end of the bracket (closer to +oo) + xal = res['x'][:n] + xar = res['x_last'][:n] + xbl = res['x_last'][n:] + xbr = res['x'][n:] + + fal = res['f'][:n] + far = res['f_last'][:n] + fbl = res['f_last'][n:] + fbr = res['f'][n:] + + # Initialize the brackets and corresponding function values to return + # to the user. Brackets may not be valid (e.g. there is no root, + # there weren't enough iterations, NaN encountered), but we still need + # to return something. One option would be all NaNs, but what I've + # chosen here is the left- and right-most points at which the function + # has been evaluated. This gives the user some information about what + # interval of the real line has been searched and shows that there is + # no sign change between the two ends. + xl = xal.copy() + fl = fal.copy() + xr = xbr.copy() + fr = fbr.copy() + + # `status` indicates whether the bracket is valid or not. If so, + # we want to adjust the bracket we return to be the narrowest possible + # given the points at which we evaluated the function. + # For example if bracket "a" is valid and smaller than bracket "b" OR + # if bracket "a" is valid and bracket "b" is not valid, we want to + # return bracket "a" (and vice versa). + sa = res['status'][:n] + sb = res['status'][n:] + + da = xar - xal + db = xbr - xbl + + i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0)) + i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0)) + + xr[i1] = xar[i1] + fr[i1] = far[i1] + xl[i2] = xbl[i2] + fl[i2] = fbl[i2] + + # Finish assembling the result object + res['xl'] = xl + res['xr'] = xr + res['fl'] = fl + res['fr'] = fr + + res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:]) + res['nfev'] = res['nfev'][:n] + res['nfev'][n:] + # If the status on one side is zero, the status is zero. In any case, + # report the status from one side only. + res['status'] = np.choose(sa == 0, (sb, sa)) + res['success'] = (res['status'] == 0) + + del res['x'] + del res['f'] + del res['x_last'] + del res['f_last'] + + return shape[:-1] + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) + + +def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter): + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + xm0 = np.asarray(xm0)[()] + if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any(): + raise ValueError('`xm0` must be numeric and real.') + + xmin = -np.inf if xmin is None else xmin + xmax = np.inf if xmax is None else xmax + + xl0_not_supplied = False + if xl0 is None: + xl0 = xm0 - 0.5 + xl0_not_supplied = True + + xr0_not_supplied = False + if xr0 is None: + xr0 = xm0 + 0.5 + xr0_not_supplied = True + + factor = 2.0 if factor is None else factor + xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays( + xl0, xm0, xr0, xmin, xmax, factor + ) + + if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any(): + raise ValueError('`xl0` must be numeric and real.') + + if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any(): + raise ValueError('`xr0` must be numeric and real.') + + if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any(): + raise ValueError('`xmin` must be numeric and real.') + + if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any(): + raise ValueError('`xmax` must be numeric and real.') + + if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any(): + raise ValueError('`factor` must be numeric and real.') + if not np.all(factor > 1): + raise ValueError('All elements of `factor` must be greater than 1.') + + # Default choices for xl or xr might have exceeded xmin or xmax. Adjust + # to make sure this doesn't happen. We replace with copies because xl, and xr + # are read-only views produced by broadcast_arrays. + if xl0_not_supplied: + xl0 = xl0.copy() + cond = ~np.isinf(xmin) & (xl0 < xmin) + xl0[cond] = ( + xm0[cond] - xmin[cond] + ) / np.array(16, dtype=xl0.dtype) + if xr0_not_supplied: + xr0 = xr0.copy() + cond = ~np.isinf(xmax) & (xmax < xr0) + xr0[cond] = ( + xmax[cond] - xm0[cond] + ) / np.array(16, dtype=xr0.dtype) + + maxiter = np.asarray(maxiter) + message = '`maxiter` must be a non-negative integer.' + if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple() + or np.iscomplex(maxiter)): + raise ValueError(message) + maxiter_int = int(maxiter[()]) + if not maxiter == maxiter_int or maxiter < 0: + raise ValueError(message) + + if not np.all((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax)): + raise ValueError( + '`xmin <= xl0 < xm0 < xr0 <= xmax` must be True (elementwise).' + ) + + return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter + + +def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None, + factor=None, args=(), maxiter=1000): + """Bracket the minimum of a unimodal scalar function of one variable + + This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`, + and the elements of `args` are broadcastable arrays. + + Parameters + ---------- + func : callable + The function for which the minimum is to be bracketed. + The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. `func` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`. + xm0: float array_like + Starting guess for middle point of bracket. + xl0, xr0: float array_like, optional + Starting guesses for left and right endpoints of the bracket. Must be + broadcastable with one another and with `xm0`. + xmin, xmax : float array_like, optional + Minimum and maximum allowable endpoints of the bracket, inclusive. Must + be broadcastable with `xl0`, `xm0`, and `xr0`. + factor : float array_like, optional + Controls expansion of bracket endpoint in downhill direction. Works + differently in the cases where a limit is set in the downhill direction + with `xmax` or `xmin`. See Notes. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the + callable to be bracketed requires arguments that are not broadcastable + with these arrays, wrap that callable with `func` such that `func` + accepts only ``x`` and broadcastable arrays. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. The number + of function evaluations is three greater than the number of iterations. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + xl, xm, xr : float + The left, middle, and right points of the bracket, if the algorithm + terminated successfully. + fl, fm, fr : float + The function value at the left, middle, and right points of the bracket. + nfev : int + The number of function evaluations required to find the bracket. + nit : int + The number of iterations of the algorithm that were performed. + status : int + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm produced a valid bracket. + - ``-1`` : The bracket expanded to the allowable limits. Assuming + unimodality, this implies the endpoint at the limit is a + minimizer. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + + Notes + ----- + Similar to `scipy.optimize.bracket`, this function seeks to find real + points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``, + where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`, + this function can operate in a vectorized manner on array input, so long as + the input arrays are broadcastable with each other. Also unlike + `scipy.optimize.bracket`, users may specify minimum and maximum endpoints + for the desired bracket. + + Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``, + the algorithm checks if these points already give a valid bracket. If not, + a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new + opposite endpoint, and either `xl` or `xr` becomes the new middle point, + depending on which direction is downhill. The algorithm repeats from here. + + The new endpoint `w` is chosen differently depending on whether or not a + boundary `xmin` or `xmax` has been set in the downhill direction. Without + loss of generality, suppose the downhill direction is to the right, so that + ``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w` + is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by + the user (defaults to 2.0) so that step sizes increase in geometric proportion. + If there is a boundary, `xmax` in this case, then `w` is chosen to be + ``xmax - (xmax - xr)/factor``, with steps slowing to a stop at + `xmax`. This cautious approach ensures that a minimum near but distinct from + the boundary isn't missed while also detecting whether or not the `xmax` is + a minimizer when `xmax` is reached after a finite number of steps. + """ # noqa: E501 + callback = None # works; I just don't want to test it + + temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter) + func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp + + xs = (xl0, xm0, xr0) + func, xs, fs, args, shape, dtype = eim._initialize(func, xs, args) + + xl0, xm0, xr0 = xs + fl0, fm0, fr0 = fs + xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel() + xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel() + # We will modify factor later on so make a copy. np.broadcast_to returns + # a read-only view. + factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel() + + # To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be + # marching downhill in the direction from xl to xr. + comp = fl0 < fr0 + xl0[comp], xr0[comp] = xr0[comp], xl0[comp] + fl0[comp], fr0[comp] = fr0[comp], fl0[comp] + # We only need the boundary in the direction we're traveling. + limit = np.where(comp, xmin, xmax) + + unlimited = np.isinf(limit) + limited = ~unlimited + step = np.empty_like(xl0) + + step[unlimited] = (xr0[unlimited] - xm0[unlimited]) + step[limited] = (limit[limited] - xr0[limited]) + + # Step size is divided by factor for case where there is a limit. + factor[limited] = 1 / factor[limited] + + status = np.full_like(xl0, eim._EINPROGRESS, dtype=int) + nit, nfev = 0, 3 + + work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0, + step=step, limit=limit, limited=limited, factor=factor, nit=nit, + nfev=nfev, status=status, args=args) + + res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'), + ('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'), + ('fr', 'fr')] + + def pre_func_eval(work): + work.step *= work.factor + x = np.empty_like(work.xr) + x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited] + x[work.limited] = work.limit[work.limited] - work.step[work.limited] + # Since the new bracket endpoint is calculated from an offset with the + # limit, it may be the case that the new endpoint equals the old endpoint, + # when the old endpoint is sufficiently close to the limit. We use the + # limit itself as the new endpoint in these cases. + x[work.limited] = np.where( + x[work.limited] == work.xr[work.limited], + work.limit[work.limited], + x[work.limited], + ) + return x + + def post_func_eval(x, f, work): + work.xl, work.xm, work.xr = work.xm, work.xr, x + work.fl, work.fm, work.fr = work.fm, work.fr, f + + def check_termination(work): + # Condition 1: A valid bracket has been found. + stop = ( + (work.fl >= work.fm) & (work.fr > work.fm) + | (work.fl > work.fm) & (work.fr >= work.fm) + ) + work.status[stop] = eim._ECONVERGED + + # Condition 2: Moving end of bracket reaches limit. + i = (work.xr == work.limit) & ~stop + work.status[i] = _ELIMITS + stop[i] = True + + # Condition 3: non-finite value encountered + i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + # Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0). + comp = res['xl'] > res['xr'] + res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp] + res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp] + return shape + + return eim._loop(work, callback, shape, + maxiter, func, args, dtype, + pre_func_eval, post_func_eval, + check_termination, post_termination_check, + customize_result, res_work_pairs) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py new file mode 100644 index 0000000000000000000000000000000000000000..02cc746b1a825bb4c419e11d54da5bbc9d43cc1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py @@ -0,0 +1,524 @@ +import numpy as np +from ._zeros_py import _xtol, _rtol, _iter +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol, + fatol=None, frtol=0, maxiter=_iter, callback=None): + """Find the root of an elementwise function using Chandrupatla's algorithm. + + For each element of the output of `func`, `chandrupatla` seeks the scalar + root that makes the element 0. This function allows for `a`, `b`, and the + output of `func` to be of any broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose root is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of components of any type(s). + ``func`` must be an elementwise function: each element ``func(x)[i]`` + must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla` + seeks an array ``x`` such that ``func(x)`` is an array of zeros. + a, b : array_like + The lower and upper bounds of the root of the function. Must be + broadcastable with one another. + args : tuple, optional + Additional positional arguments to be passed to `func`. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the root and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape. + + x : float + The root of the function, if the algorithm terminated successfully. + nfev : int + The number of times the function was called to find the root. + nit : int + The number of iterations of Chandrupatla's algorithm performed. + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + fun : float + The value of `func` evaluated at `x`. + xl, xr : float + The lower and upper ends of the bracket. + fl, fr : float + The function value at the lower and upper ends of the bracket. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``xl`` and ``xr`` are the left and right ends of the bracket, + ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``, + and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to + have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or + ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the + termination condition described in [1]_ with ``xrtol = 4e-10``, + ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are + ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``, + and ``fatol`` is the smallest normal number of the ``dtype`` returned + by ``func``. + + References + ---------- + + .. [1] Chandrupatla, Tirupathi R. + "A new hybrid quadratic/bisection algorithm for finding the zero of a + nonlinear function without using derivatives". + Advances in Engineering Software, 28(3), 145-149. + https://doi.org/10.1016/s0965-9978(96)00051-8 + + See Also + -------- + brentq, brenth, ridder, bisect, newton + + Examples + -------- + >>> from scipy import optimize + >>> def f(x, c): + ... return x**3 - 2*x - c + >>> c = 5 + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + 2.0945514818937463 + + >>> c = [3, 4, 5] + >>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,)) + >>> res.x + array([1.8932892 , 2. , 2.09455148]) + + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + temp = eim._initialize(func, (a, b), args) + func, xs, fs, args, shape, dtype = temp + x1, x2 = xs + f1, f2 = fs + status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 2 # two function evaluations performed above + xatol = _xtol if xatol is None else xatol + xrtol = _rtol if xrtol is None else xrtol + fatol = np.finfo(dtype).tiny if fatol is None else fatol + frtol = frtol * np.minimum(np.abs(f1), np.abs(f2)) + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status) + res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'), + ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'), + ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')] + + def pre_func_eval(work): + # [1] Figure 1 (first box) + x = work.x1 + work.t * (work.x2 - work.x1) + return x + + def post_func_eval(x, f, work): + # [1] Figure 1 (first diamond and boxes) + # Note: y/n are reversed in figure; compare to BASIC in appendix + work.x3, work.f3 = work.x2.copy(), work.f2.copy() + j = np.sign(f) == np.sign(work.f1) + nj = ~j + work.x3[j], work.f3[j] = work.x1[j], work.f1[j] + work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj] + work.x1, work.f1 = x, f + + def check_termination(work): + # [1] Figure 1 (second diamond) + # Check for all terminal conditions and record statuses. + + # See [1] Section 4 (first two sentences) + i = np.abs(work.f1) < np.abs(work.f2) + work.xmin = np.choose(i, (work.x2, work.x1)) + work.fmin = np.choose(i, (work.f2, work.f1)) + stop = np.zeros_like(work.x1, dtype=bool) # termination condition met + + # This is the convergence criterion used in bisect. Chandrupatla's + # criterion is equivalent to this except with a factor of 4 on `xrtol`. + work.dx = abs(work.x2 - work.x1) + work.tol = abs(work.xmin) * work.xrtol + work.xatol + i = work.dx < work.tol + # Modify in place to incorporate tolerance on function value. Note that + # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`, + # where `f1` and `f2` are the function evaluated at the original ends of + # the bracket. + i |= np.abs(work.fmin) <= work.fatol + work.frtol + work.status[i] = eim._ECONVERGED + stop[i] = True + + i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop + work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._ESIGNERR + stop[i] = True + + i = ~((np.isfinite(work.x1) & np.isfinite(work.x2) + & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop) + work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + # [1] Figure 1 (third diamond and boxes / Equation 1) + xi1 = (work.x1 - work.x2) / (work.x3 - work.x2) + phi1 = (work.f1 - work.f2) / (work.f3 - work.f2) + alpha = (work.x3 - work.x1) / (work.x2 - work.x1) + j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1)) + + f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j] + t = np.full_like(alpha, 0.5) + t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j) + - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j)) + + # [1] Figure 1 (last box; see also BASIC in appendix with comment + # "Adjust T Away from the Interval Boundary") + tl = 0.5 * work.tol / work.dx + work.t = np.clip(t, tl, 1 - tl) + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] < res['xr'] + res['xl'] = np.choose(i, (xr, xl)) + res['xr'] = np.choose(i, (xl, xr)) + res['fl'] = np.choose(i, (fr, fl)) + res['fr'] = np.choose(i, (fl, fr)) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) + + +def _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback): + # Input validation for `_chandrupatla` + + if not callable(func): + raise ValueError('`func` must be callable.') + + if not np.iterable(args): + args = (args,) + + tols = np.asarray([xatol if xatol is not None else 1, + xrtol if xrtol is not None else 1, + fatol if fatol is not None else 1, + frtol if frtol is not None else 1]) + if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0) + or np.any(np.isnan(tols)) or tols.shape != (4,)): + raise ValueError('Tolerances must be non-negative scalars.') + + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter < 0: + raise ValueError('`maxiter` must be a non-negative integer.') + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return func, args, xatol, xrtol, fatol, frtol, maxiter, callback + + +def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None, + xrtol=None, fatol=None, frtol=None, maxiter=100, + callback=None): + """Find the minimizer of an elementwise function. + + For each element of the output of `func`, `_chandrupatla_minimize` seeks + the scalar minimizer that minimizes the element. This function allows for + `x1`, `x2`, `x3`, and the elements of `args` to be arrays of any + broadcastable shapes. + + Parameters + ---------- + func : callable + The function whose minimizer is desired. The signature must be:: + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array + of minima. + x1, x2, x3 : array_like + The abscissae of a standard scalar minimization bracket. A bracket is + valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``. + Must be broadcastable with one another and `args`. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `x1`, `x2`, and `x3`. If the callable to be + differentiated requires arguments that are not broadcastable with `x`, + wrap that callable with `func` such that `func` accepts only `x` and + broadcastable arrays. + xatol, xrtol, fatol, frtol : float, optional + Absolute and relative tolerances on the minimizer and function value. + See Notes for details. + maxiter : int, optional + The maximum number of iterations of the algorithm to perform. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_chandrupatla_minimize` (but containing + the current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_chandrupatla_minimize` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The algorithm encountered an invalid bracket. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + x : float + The minimizer of the function, if the algorithm terminated + successfully. + fun : float + The value of `func` evaluated at `x`. + nfev : int + The number of points at which `func` was evaluated. + nit : int + The number of iterations of the algorithm that were performed. + xl, xm, xr : float + The final three-point bracket. + fl, fm, fr : float + The function value at the bracket points. + + Notes + ----- + Implemented based on Chandrupatla's original paper [1]_. + + If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3`` + are the values of ``func`` at those points, then the algorithm is + considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol`` + or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of + these differs from the termination conditions described in [1]_. The + default values of `xrtol` is the square root of the precision of the + appropriate dtype, and ``xatol=fatol = frtol`` is the smallest normal + number of the appropriate dtype. + + References + ---------- + .. [1] Chandrupatla, Tirupathi R. (1998). + "An efficient quadratic fit-sectioning algorithm for minimization + without derivatives". + Computer Methods in Applied Mechanics and Engineering, 152 (1-2), + 211-217. https://doi.org/10.1016/S0045-7825(97)00190-4 + + See Also + -------- + golden, brent, bounded + + Examples + -------- + >>> from scipy.optimize._chandrupatla import _chandrupatla_minimize + >>> def f(x, args=1): + ... return (x - args)**2 + >>> res = _chandrupatla_minimize(f, -5, 0, 5) + >>> res.x + 1.0 + >>> c = [1, 1.5, 2] + >>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,)) + >>> res.x + array([1. , 1.5, 2. ]) + """ + res = _chandrupatla_iv(func, args, xatol, xrtol, + fatol, frtol, maxiter, callback) + func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res + + # Initialization + xs = (x1, x2, x3) + temp = eim._initialize(func, xs, args) + func, xs, fs, args, shape, dtype = temp # line split for PEP8 + x1, x2, x3 = xs + f1, f2, f3 = fs + phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio + status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 3 # three function evaluations performed above + fatol = np.finfo(dtype).tiny if fatol is None else fatol + frtol = np.finfo(dtype).tiny if frtol is None else frtol + xatol = np.finfo(dtype).tiny if xatol is None else xatol + xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol + + # Ensure that x1 < x2 < x3 initially. + xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3)) + i = np.argsort(xs, axis=0) + x1, x2, x3 = np.take_along_axis(xs, i, axis=0) + f1, f2, f3 = np.take_along_axis(fs, i, axis=0) + q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7)) + + work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi, + xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, + nit=nit, nfev=nfev, status=status, q0=q0, args=args) + res_work_pairs = [('status', 'status'), + ('x', 'x2'), ('fun', 'f2'), + ('nit', 'nit'), ('nfev', 'nfev'), + ('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'), + ('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')] + + def pre_func_eval(work): + # `_check_termination` is called first -> `x3 - x2 > x2 - x1` + # But let's calculate a few terms that we'll reuse + x21 = work.x2 - work.x1 + x32 = work.x3 - work.x2 + + # [1] Section 3. "The quadratic minimum point Q1 is calculated using + # the relations developed in the previous section." [1] Section 2 (5/6) + A = x21 * (work.f3 - work.f2) + B = x32 * (work.f1 - work.f2) + C = A / (A + B) + # q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2 + q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster + # this is an array, so multiplying by 0.5 does not change dtype + + # "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is + # sufficiently away from the inside point x2" + i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7) + xi = q1[i] + # Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of + # x2, the new point is chosen in the larger interval at a distance + # tol away from x2." + # See also QBASIC code after "Accept Ql adjust if close to X2". + j = abs(q1[i] - work.x2[i]) <= work.xtol[i] + xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j] + + # "If condition (7) is not satisfied, golden sectioning of the larger + # interval is carried out to introduce the new point." + # (For simplicity, we go ahead and calculate it for all points, but we + # change the elements for which the condition was satisfied.) + x = work.x2 + (2 - work.phi) * x32 + x[i] = xi + + # "We define Q0 as the value of Q1 at the previous iteration." + work.q0 = q1 + return x + + def post_func_eval(x, f, work): + # Standard logic for updating a three-point bracket based on a new + # point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...". + # There is an awful lot of data copying going on here; this would + # probably benefit from code optimization or implementation in Pythran. + i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2) + xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i], + fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i] + j = fi > f2i + x3i[j], f3i[j] = xi[j], fi[j] + j = ~j + x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j] + + ni = ~i + xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni], + fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni] + j = fni > f2ni + x1ni[j], f1ni[j] = xni[j], fni[j] + j = ~j + x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j] + + work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i + work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i + work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni, + work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni + + def check_termination(work): + # Check for all terminal conditions and record statuses. + stop = np.zeros_like(work.x1, dtype=bool) # termination condition met + + # Bracket is invalid; stop and don't return minimizer/minimum + i = ((work.f2 > work.f1) | (work.f2 > work.f3)) + work.x2[i], work.f2[i] = np.nan, np.nan + stop[i], work.status[i] = True, eim._ESIGNERR + + # Non-finite values; stop and don't return minimizer/minimum + finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3) + i = ~(finite | stop) + work.x2[i], work.f2[i] = np.nan, np.nan + stop[i], work.status[i] = True, eim._EVALUEERR + + # [1] Section 3 "Points 1 and 3 are interchanged if necessary to make + # the (x2, x3) the larger interval." + # Note: I had used np.choose; this is much faster. This would be a good + # place to save e.g. `work.x3 - work.x2` for reuse, but I tried and + # didn't notice a speed boost, so let's keep it simple. + i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1) + temp = work.x1[i] + work.x1[i] = work.x3[i] + work.x3[i] = temp + temp = work.f1[i] + work.f1[i] = work.f3[i] + work.f3[i] = temp + + # [1] Section 3 (bottom of page 212) + # "We set a tolerance value xtol..." + work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8) + # "The convergence based on interval is achieved when..." + # Note: Equality allowed in case of `xtol=0` + i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9) + + # "We define ftol using..." + ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10) + # "The convergence based on function values is achieved when..." + # Note 1: modify in place to incorporate tolerance on function value. + # Note 2: factor of 2 is not in the text; see QBASIC start of DO loop + i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11) + i &= ~stop + stop[i], work.status[i] = True, eim._ECONVERGED + + return stop + + def post_termination_check(work): + pass + + def customize_result(res, shape): + xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] + i = res['xl'] < res['xr'] + res['xl'] = np.choose(i, (xr, xl)) + res['xr'] = np.choose(i, (xl, xr)) + res['fl'] = np.choose(i, (fr, fl)) + res['fr'] = np.choose(i, (fl, fr)) + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c770500851262233d46715a6a6f9f630b24e4b87 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py new file mode 100644 index 0000000000000000000000000000000000000000..9007fe38a06a91fe456e64d74f4c0e37800f0607 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py @@ -0,0 +1,316 @@ +""" +Interface to Constrained Optimization By Linear Approximation + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_cobyla + +""" + +import functools +from threading import RLock + +import numpy as np +from scipy.optimize import _cobyla as cobyla +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +try: + from itertools import izip +except ImportError: + izip = zip + +__all__ = ['fmin_cobyla'] + +# Workaround as _cobyla.minimize is not threadsafe +# due to an unknown f2py bug and can segfault, +# see gh-9658. +_module_lock = RLock() +def synchronized(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + with _module_lock: + return func(*args, **kwargs) + return wrapper + +@synchronized +def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, + rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4, + *, callback=None): + """ + Minimize a function using the Constrained Optimization By Linear + Approximation (COBYLA) method. This method wraps a FORTRAN + implementation of the algorithm. + + Parameters + ---------- + func : callable + Function to minimize. In the form func(x, \\*args). + x0 : ndarray + Initial guess. + cons : sequence + Constraint functions; must all be ``>=0`` (a single function + if only 1 constraint). Each function takes the parameters `x` + as its first argument, and it can return either a single number or + an array or list of numbers. + args : tuple, optional + Extra arguments to pass to function. + consargs : tuple, optional + Extra arguments to pass to constraint functions (default of None means + use same extra arguments as those passed to func). + Use ``()`` for no extra arguments. + rhobeg : float, optional + Reasonable initial changes to the variables. + rhoend : float, optional + Final accuracy in the optimization (not precisely guaranteed). This + is a lower bound on the size of the trust region. + disp : {0, 1, 2, 3}, optional + Controls the frequency of output; 0 implies no output. + maxfun : int, optional + Maximum number of function evaluations. + catol : float, optional + Absolute tolerance for constraint violations. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + x : ndarray + The argument that minimises `f`. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'COBYLA' `method` in particular. + + Notes + ----- + This algorithm is based on linear approximations to the objective + function and each constraint. We briefly describe the algorithm. + + Suppose the function is being minimized over k variables. At the + jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), + an approximate solution x_j, and a radius RHO_j. + (i.e., linear plus a constant) approximations to the objective + function and constraint functions such that their function values + agree with the linear approximation on the k+1 points v_1,.., v_(k+1). + This gives a linear program to solve (where the linear approximations + of the constraint functions are constrained to be non-negative). + + However, the linear approximations are likely only good + approximations near the current simplex, so the linear program is + given the further requirement that the solution, which + will become x_(j+1), must be within RHO_j from x_j. RHO_j only + decreases, never increases. The initial RHO_j is rhobeg and the + final RHO_j is rhoend. In this way COBYLA's iterations behave + like a trust region algorithm. + + Additionally, the linear program may be inconsistent, or the + approximation may give poor improvement. For details about + how these issues are resolved, as well as how the points v_i are + updated, refer to the source code or the references below. + + + References + ---------- + Powell M.J.D. (1994), "A direct search optimization method that models + the objective and constraint functions by linear interpolation.", in + Advances in Optimization and Numerical Analysis, eds. S. Gomez and + J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 + + Powell M.J.D. (1998), "Direct search algorithms for optimization + calculations", Acta Numerica 7, 287-336 + + Powell M.J.D. (2007), "A view of algorithms for optimization without + derivatives", Cambridge University Technical Report DAMTP 2007/NA03 + + + Examples + -------- + Minimize the objective function f(x,y) = x*y subject + to the constraints x**2 + y**2 < 1 and y > 0:: + + >>> def objective(x): + ... return x[0]*x[1] + ... + >>> def constr1(x): + ... return 1 - (x[0]**2 + x[1]**2) + ... + >>> def constr2(x): + ... return x[1] + ... + >>> from scipy.optimize import fmin_cobyla + >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) + array([-0.70710685, 0.70710671]) + + The exact solution is (-sqrt(2)/2, sqrt(2)/2). + + + + """ + err = "cons must be a sequence of callable functions or a single"\ + " callable function." + try: + len(cons) + except TypeError as e: + if callable(cons): + cons = [cons] + else: + raise TypeError(err) from e + else: + for thisfunc in cons: + if not callable(thisfunc): + raise TypeError(err) + + if consargs is None: + consargs = args + + # build constraints + con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) + + # options + opts = {'rhobeg': rhobeg, + 'tol': rhoend, + 'disp': disp, + 'maxiter': maxfun, + 'catol': catol, + 'callback': callback} + + sol = _minimize_cobyla(func, x0, args, constraints=con, + **opts) + if disp and not sol['success']: + print(f"COBYLA failed to find a solution: {sol.message}") + return sol['x'] + + +@synchronized +def _minimize_cobyla(fun, x0, args=(), constraints=(), + rhobeg=1.0, tol=1e-4, maxiter=1000, + disp=False, catol=2e-4, callback=None, bounds=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using the + Constrained Optimization BY Linear Approximation (COBYLA) algorithm. + + Options + ------- + rhobeg : float + Reasonable initial changes to the variables. + tol : float + Final accuracy in the optimization (not precisely guaranteed). + This is a lower bound on the size of the trust region. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored as set to 0. + maxiter : int + Maximum number of function evaluations. + catol : float + Tolerance (absolute) for constraint violations + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + rhoend = tol + iprint = int(bool(disp)) + + # check constraints + if isinstance(constraints, dict): + constraints = (constraints, ) + + if bounds: + i_lb = np.isfinite(bounds.lb) + if np.any(i_lb): + def lb_constraint(x, *args, **kwargs): + return x[i_lb] - bounds.lb[i_lb] + + constraints.append({'type': 'ineq', 'fun': lb_constraint}) + + i_ub = np.isfinite(bounds.ub) + if np.any(i_ub): + def ub_constraint(x): + return bounds.ub[i_ub] - x[i_ub] + + constraints.append({'type': 'ineq', 'fun': ub_constraint}) + + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype != 'ineq': + raise ValueError("Constraints of type '%s' not handled by " + "COBYLA." % con['type']) + + # check function + if 'fun' not in con: + raise KeyError('Constraint %d has no function defined.' % ic) + + # check extra arguments + if 'args' not in con: + con['args'] = () + + # m is the total number of constraint values + # it takes into account that some constraints may be vector-valued + cons_lengths = [] + for c in constraints: + f = c['fun'](x0, *c['args']) + try: + cons_length = len(f) + except TypeError: + cons_length = 1 + cons_lengths.append(cons_length) + m = sum(cons_lengths) + + # create the ScalarFunction, cobyla doesn't require derivative function + def _jac(x, *args): + return None + + sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac) + + def calcfc(x, con): + f = sf.fun(x) + i = 0 + for size, c in izip(cons_lengths, constraints): + con[i: i + size] = c['fun'](x, *c['args']) + i += size + return f + + def wrapped_callback(x): + if callback is not None: + callback(np.copy(x)) + + info = np.zeros(4, np.float64) + xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, + rhoend=rhoend, iprint=iprint, maxfun=maxfun, + dinfo=info, callback=wrapped_callback) + + if info[3] > catol: + # Check constraint violation + info[0] = 4 + + return OptimizeResult(x=xopt, + status=int(info[0]), + success=info[0] == 1, + message={1: 'Optimization terminated successfully.', + 2: 'Maximum number of function evaluations ' + 'has been exceeded.', + 3: 'Rounding errors are becoming damaging ' + 'in COBYLA subroutine.', + 4: 'Did not converge to a solution ' + 'satisfying the constraints. See ' + '`maxcv` for magnitude of violation.', + 5: 'NaN result encountered.' + }.get(info[0], 'Unknown exit status.'), + nfev=int(info[1]), + fun=info[2], + maxcv=info[3]) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_constraints.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..1c7ff5e170b2eb518bc6be0c667ac9f89a073dcf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_constraints.py @@ -0,0 +1,590 @@ +"""Constraints definition for minimize.""" +import numpy as np +from ._hessian_update_strategy import BFGS +from ._differentiable_functions import ( + VectorFunction, LinearVectorFunction, IdentityVectorFunction) +from ._optimize import OptimizeWarning +from warnings import warn, catch_warnings, simplefilter, filterwarnings +from scipy.sparse import issparse + + +def _arr_to_scalar(x): + # If x is a numpy array, return x.item(). This will + # fail if the array has more than one element. + return x.item() if isinstance(x, np.ndarray) else x + + +class NonlinearConstraint: + """Nonlinear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= fun(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and ``fun`` returns a vector with m components. + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + fun : callable + The function defining the constraint. + The signature is ``fun(x) -> array_like, shape (m,)``. + lb, ub : array_like + Lower and upper bounds on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. + jac : {callable, '2-point', '3-point', 'cs'}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, + where element (i, j) is the partial derivative of f[i] with + respect to x[j]). The keywords {'2-point', '3-point', + 'cs'} select a finite difference scheme for the numerical estimation. + A callable must have the following signature: + ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. + Default is '2-point'. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional + Method for computing the Hessian matrix. The keywords + {'2-point', '3-point', 'cs'} select a finite difference scheme for + numerical estimation. Alternatively, objects implementing + `HessianUpdateStrategy` interface can be used to approximate the + Hessian. Currently available implementations are: + + - `BFGS` (default option) + - `SR1` + + A callable must return the Hessian matrix of ``dot(fun, v)`` and + must have the following signature: + ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. + Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + finite_diff_rel_step: None or array_like, optional + Relative step size for the finite difference approximation. Default is + None, which will select a reasonable value automatically depending + on a finite difference scheme. + finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations. A zero entry means + that a corresponding element in the Jacobian is identically zero. + If provided, forces the use of 'lsmr' trust-region solver. + If None (default) then dense differencing will be used. + + Notes + ----- + Finite difference schemes {'2-point', '3-point', 'cs'} may be used for + approximating either the Jacobian or the Hessian. We, however, do not allow + its use for approximating both simultaneously. Hence whenever the Jacobian + is estimated via finite-differences, we require the Hessian to be estimated + using one of the quasi-Newton strategies. + + The scheme 'cs' is potentially the most accurate, but requires the function + to correctly handles complex inputs and be analytically continuable to the + complex plane. The scheme '3-point' is more accurate than '2-point' but + requires twice as many operations. + + Examples + -------- + Constrain ``x[0] < sin(x[1]) + 1.9`` + + >>> from scipy.optimize import NonlinearConstraint + >>> import numpy as np + >>> con = lambda x: x[0] - np.sin(x[1]) + >>> nlc = NonlinearConstraint(con, -np.inf, 1.9) + + """ + def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), + keep_feasible=False, finite_diff_rel_step=None, + finite_diff_jac_sparsity=None): + self.fun = fun + self.lb = lb + self.ub = ub + self.finite_diff_rel_step = finite_diff_rel_step + self.finite_diff_jac_sparsity = finite_diff_jac_sparsity + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + +class LinearConstraint: + """Linear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= A.dot(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and the matrix A has shape (m, n). + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + A : {array_like, sparse matrix}, shape (m, n) + Matrix defining the constraint. + lb, ub : dense array_like, optional + Lower and upper limits on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no limits). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + if self.A.ndim != 2: + message = "`A` must have exactly two dimensions." + raise ValueError(message) + + try: + shape = self.A.shape[0:1] + self.lb = np.broadcast_to(self.lb, shape) + self.ub = np.broadcast_to(self.ub, shape) + self.keep_feasible = np.broadcast_to(self.keep_feasible, shape) + except ValueError: + message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable " + "to shape `A.shape[0:1]`") + raise ValueError(message) + + def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False): + if not issparse(A): + # In some cases, if the constraint is not valid, this emits a + # VisibleDeprecationWarning about ragged nested sequences + # before eventually causing an error. `scipy.optimize.milp` would + # prefer that this just error out immediately so it can handle it + # rather than concerning the user. + with catch_warnings(): + simplefilter("error") + self.A = np.atleast_2d(A).astype(np.float64) + else: + self.A = A + if issparse(lb) or issparse(ub): + raise ValueError("Constraint limits must be dense arrays.") + self.lb = np.atleast_1d(lb).astype(np.float64) + self.ub = np.atleast_1d(ub).astype(np.float64) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def residual(self, x): + """ + Calculate the residual between the constraint function and the limits + + For a linear constraint of the form:: + + lb <= A@x <= ub + + the lower and upper residuals between ``A@x`` and the limits are values + ``sl`` and ``sb`` such that:: + + lb + sl == A@x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + the constraint are satisfied; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of the constraint is not + satisfied. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return self.A@x - self.lb, self.ub - self.A@x + + +class Bounds: + """Bounds constraint on the variables. + + The constraint has the general inequality form:: + + lb <= x <= ub + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + lb, ub : dense array_like, optional + Lower and upper bounds on independent variables. `lb`, `ub`, and + `keep_feasible` must be the same shape or broadcastable. + Set components of `lb` and `ub` equal + to fix a variable. Use ``np.inf`` with an appropriate sign to disable + bounds on all or some variables. Note that you can mix constraints of + different types: interval, one-sided or equality, by setting different + components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` + and ``ub = np.inf`` (no bounds). + keep_feasible : dense array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. Must be broadcastable with `lb` and `ub`. + Default is False. Has no effect for equality constraints. + """ + def _input_validation(self): + try: + res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible) + self.lb, self.ub, self.keep_feasible = res + except ValueError: + message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." + raise ValueError(message) + + def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False): + if issparse(lb) or issparse(ub): + raise ValueError("Lower and upper bounds must be dense arrays.") + self.lb = np.atleast_1d(lb) + self.ub = np.atleast_1d(ub) + + if issparse(keep_feasible): + raise ValueError("`keep_feasible` must be a dense array.") + self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) + self._input_validation() + + def __repr__(self): + start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}" + if np.any(self.keep_feasible): + end = f", keep_feasible={self.keep_feasible!r})" + else: + end = ")" + return start + end + + def residual(self, x): + """Calculate the residual (slack) between the input and the bounds + + For a bound constraint of the form:: + + lb <= x <= ub + + the lower and upper residuals between `x` and the bounds are values + ``sl`` and ``sb`` such that:: + + lb + sl == x == ub - sb + + When all elements of ``sl`` and ``sb`` are positive, all elements of + ``x`` lie within the bounds; a negative element in ``sl`` or ``sb`` + indicates that the corresponding element of ``x`` is out of bounds. + + Parameters + ---------- + x: array_like + Vector of independent variables + + Returns + ------- + sl, sb : array-like + The lower and upper residuals + """ + return x - self.lb, self.ub - x + + +class PreparedConstraint: + """Constraint prepared from a user defined constraint. + + On creation it will check whether a constraint definition is valid and + the initial point is feasible. If created successfully, it will contain + the attributes listed below. + + Parameters + ---------- + constraint : {NonlinearConstraint, LinearConstraint`, Bounds} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables. + sparse_jacobian : bool or None, optional + If bool, then the Jacobian of the constraint will be converted + to the corresponded format if necessary. If None (default), such + conversion is not made. + finite_diff_bounds : 2-tuple, optional + Lower and upper bounds on the independent variables for the finite + difference approximation, if applicable. Defaults to no bounds. + + Attributes + ---------- + fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + keep_feasible : ndarray + Array indicating which components must be kept feasible with a size + equal to the number of the constraints. + """ + def __init__(self, constraint, x0, sparse_jacobian=None, + finite_diff_bounds=(-np.inf, np.inf)): + if isinstance(constraint, NonlinearConstraint): + fun = VectorFunction(constraint.fun, x0, + constraint.jac, constraint.hess, + constraint.finite_diff_rel_step, + constraint.finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian) + elif isinstance(constraint, LinearConstraint): + fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) + elif isinstance(constraint, Bounds): + fun = IdentityVectorFunction(x0, sparse_jacobian) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + m = fun.m + + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) + + lb = np.broadcast_to(lb, m) + ub = np.broadcast_to(ub, m) + keep_feasible = np.broadcast_to(keep_feasible, m) + + if keep_feasible.shape != (m,): + raise ValueError("`keep_feasible` has a wrong shape.") + + mask = keep_feasible & (lb != ub) + f0 = fun.f + if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): + raise ValueError("`x0` is infeasible with respect to some " + "inequality constraint with `keep_feasible` " + "set to True.") + + self.fun = fun + self.bounds = (lb, ub) + self.keep_feasible = keep_feasible + + def violation(self, x): + """How much the constraint is exceeded by. + + Parameters + ---------- + x : array-like + Vector of independent variables + + Returns + ------- + excess : array-like + How much the constraint is exceeded by, for each of the + constraints specified by `PreparedConstraint.fun`. + """ + with catch_warnings(): + # Ignore the following warning, it's not important when + # figuring out total violation + # UserWarning: delta_grad == 0.0. Check if the approximated + # function is linear + filterwarnings("ignore", "delta_grad", UserWarning) + ev = self.fun.fun(np.asarray(x)) + + excess_lb = np.maximum(self.bounds[0] - ev, 0) + excess_ub = np.maximum(ev - self.bounds[1], 0) + + return excess_lb + excess_ub + + +def new_bounds_to_old(lb, ub, n): + """Convert the new bounds representation to the old one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are -np.inf/np.inf they are replaced by + None. + """ + lb = np.broadcast_to(lb, n) + ub = np.broadcast_to(ub, n) + + lb = [float(x) if x > -np.inf else None for x in lb] + ub = [float(x) if x < np.inf else None for x in ub] + + return list(zip(lb, ub)) + + +def old_bound_to_new(bounds): + """Convert the old bounds representation to the new one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, ith containing lower and upper bound on a ith + variable. + If any of the entries in lb/ub are None they are replaced by + -np.inf/np.inf. + """ + lb, ub = zip(*bounds) + + # Convert occurrences of None to -inf or inf, and replace occurrences of + # any numpy array x with x.item(). Then wrap the results in numpy arrays. + lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf + for x in lb]) + ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf + for x in ub]) + + return lb, ub + + +def strict_bounds(lb, ub, keep_feasible, n_vars): + """Remove bounds which are not asked to be kept feasible.""" + strict_lb = np.resize(lb, n_vars).astype(float) + strict_ub = np.resize(ub, n_vars).astype(float) + keep_feasible = np.resize(keep_feasible, n_vars) + strict_lb[~keep_feasible] = -np.inf + strict_ub[~keep_feasible] = np.inf + return strict_lb, strict_ub + + +def new_constraint_to_old(con, x0): + """ + Converts new-style constraint objects to old-style constraint dictionaries. + """ + if isinstance(con, NonlinearConstraint): + if (con.finite_diff_jac_sparsity is not None or + con.finite_diff_rel_step is not None or + not isinstance(con.hess, BFGS) or # misses user specified BFGS + con.keep_feasible): + warn("Constraint options `finite_diff_jac_sparsity`, " + "`finite_diff_rel_step`, `keep_feasible`, and `hess`" + "are ignored by this method.", + OptimizeWarning, stacklevel=3) + + fun = con.fun + if callable(con.jac): + jac = con.jac + else: + jac = None + + else: # LinearConstraint + if np.any(con.keep_feasible): + warn("Constraint option `keep_feasible` is ignored by this method.", + OptimizeWarning, stacklevel=3) + + A = con.A + if issparse(A): + A = A.toarray() + def fun(x): + return np.dot(A, x) + def jac(x): + return A + + # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, + # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. + pcon = PreparedConstraint(con, x0) + lb, ub = pcon.bounds + + i_eq = lb == ub + i_bound_below = np.logical_xor(lb != -np.inf, i_eq) + i_bound_above = np.logical_xor(ub != np.inf, i_eq) + i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) + + if np.any(i_unbounded): + warn("At least one constraint is unbounded above and below. Such " + "constraints are ignored.", + OptimizeWarning, stacklevel=3) + + ceq = [] + if np.any(i_eq): + def f_eq(x): + y = np.array(fun(x)).flatten() + return y[i_eq] - lb[i_eq] + ceq = [{"type": "eq", "fun": f_eq}] + + if jac is not None: + def j_eq(x): + dy = jac(x) + if issparse(dy): + dy = dy.toarray() + dy = np.atleast_2d(dy) + return dy[i_eq, :] + ceq[0]["jac"] = j_eq + + cineq = [] + n_bound_below = np.sum(i_bound_below) + n_bound_above = np.sum(i_bound_above) + if n_bound_below + n_bound_above: + def f_ineq(x): + y = np.zeros(n_bound_below + n_bound_above) + y_all = np.array(fun(x)).flatten() + y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] + y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) + return y + cineq = [{"type": "ineq", "fun": f_ineq}] + + if jac is not None: + def j_ineq(x): + dy = np.zeros((n_bound_below + n_bound_above, len(x0))) + dy_all = jac(x) + if issparse(dy_all): + dy_all = dy_all.toarray() + dy_all = np.atleast_2d(dy_all) + dy[:n_bound_below, :] = dy_all[i_bound_below] + dy[n_bound_below:, :] = -dy_all[i_bound_above] + return dy + cineq[0]["jac"] = j_ineq + + old_constraints = ceq + cineq + + if len(old_constraints) > 1: + warn("Equality and inequality constraints are specified in the same " + "element of the constraint list. For efficient use with this " + "method, equality and inequality constraints should be specified " + "in separate elements of the constraint list. ", + OptimizeWarning, stacklevel=3) + return old_constraints + + +def old_constraint_to_new(ic, con): + """ + Converts old-style constraint dictionaries to new-style constraint objects. + """ + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError( + 'Constraints must be a sequence of dictionaries.' + ) from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + lb = 0 + if ctype == 'eq': + ub = 0 + else: + ub = np.inf + + jac = '2-point' + if 'args' in con: + args = con['args'] + def fun(x): + return con["fun"](x, *args) + if 'jac' in con: + def jac(x): + return con["jac"](x, *args) + else: + fun = con['fun'] + if 'jac' in con: + jac = con['jac'] + + return NonlinearConstraint(fun, lb, ub, jac) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiate.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..0b59a4fdd9924857d9191b5532387ff085a466a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiate.py @@ -0,0 +1,669 @@ +# mypy: disable-error-code="attr-defined" +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +_EERRORINCREASE = -1 # used in _differentiate + +def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback): + # Input validation for `_differentiate` + + if not callable(func): + raise ValueError('`func` must be callable.') + + # x has more complex IV that is taken care of during initialization + x = np.asarray(x) + dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64 + + if not np.iterable(args): + args = (args,) + + if atol is None: + atol = np.finfo(dtype).tiny + + if rtol is None: + rtol = np.sqrt(np.finfo(dtype).eps) + + message = 'Tolerances and step parameters must be non-negative scalars.' + tols = np.asarray([atol, rtol, initial_step, step_factor]) + if (not np.issubdtype(tols.dtype, np.number) + or np.any(tols < 0) + or tols.shape != (4,)): + raise ValueError(message) + initial_step, step_factor = tols[2:].astype(dtype) + + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter <= 0: + raise ValueError('`maxiter` must be a positive integer.') + + order_int = int(order) + if order_int != order or order <= 0: + raise ValueError('`order` must be a positive integer.') + + step_direction = np.sign(step_direction).astype(dtype) + x, step_direction = np.broadcast_arrays(x, step_direction) + x, step_direction = x[()], step_direction[()] + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step, + step_factor, step_direction, preserve_shape, callback) + + +def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10, + order=8, initial_step=0.5, step_factor=2.0, + step_direction=0, preserve_shape=False, callback=None): + """Evaluate the derivative of an elementwise scalar function numerically. + + Parameters + ---------- + func : callable + The function whose derivative is desired. The signature must be:: + + func(x: ndarray, *fargs) -> ndarray + + where each element of ``x`` is a finite real and ``fargs`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise function: each element + ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. + x : array_like + Abscissae at which to evaluate the derivative. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `x`. If the callable to be differentiated requires + arguments that are not broadcastable with `x`, wrap that callable with + `func`. See Examples. + atol, rtol : float, optional + Absolute and relative tolerances for the stopping condition: iteration + will stop when ``res.error < atol + rtol * abs(res.df)``. The default + `atol` is the smallest normal number of the appropriate dtype, and + the default `rtol` is the square root of the precision of the + appropriate dtype. + order : int, default: 8 + The (positive integer) order of the finite difference formula to be + used. Odd integers will be rounded up to the next even integer. + initial_step : float, default: 0.5 + The (absolute) initial step size for the finite difference derivative + approximation. + step_factor : float, default: 2.0 + The factor by which the step size is *reduced* in each iteration; i.e. + the step size in iteration 1 is ``initial_step/step_factor``. If + ``step_factor < 1``, subsequent steps will be greater than the initial + step; this may be useful if steps smaller than some threshold are + undesirable (e.g. due to subtractive cancellation error). + maxiter : int, default: 10 + The maximum number of iterations of the algorithm to perform. See + notes. + step_direction : array_like + An array representing the direction of the finite difference steps (for + use when `x` lies near to the boundary of the domain of the function.) + Must be broadcastable with `x` and all `args`. + Where 0 (default), central differences are used; where negative (e.g. + -1), steps are non-positive; and where positive (e.g. 1), all steps are + non-negative. + preserve_shape : bool, default: False + In the following, "arguments of `func`" refers to the array ``x`` and + any arrays within ``fargs``. Let ``shape`` be the broadcasted shape + of `x` and all elements of `args` (which is conceptually + distinct from ``fargs`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi`` within `x`, the array + returned by `f` must include the scalar ``f(xi)`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``x``. + + See Examples. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_differentiate` (but containing the + current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_differentiate` will return a result. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : The error estimate increased, so iteration was terminated. + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + df : float + The derivative of `func` at `x`, if the algorithm terminated + successfully. + error : float + An estimate of the error: the magnitude of the difference between + the current estimate of the derivative and the estimate in the + previous iteration. + nit : int + The number of iterations performed. + nfev : int + The number of points at which `func` was evaluated. + x : float + The value at which the derivative of `func` was evaluated + (after broadcasting with `args` and `step_direction`). + + Notes + ----- + The implementation was inspired by jacobi [1]_, numdifftools [2]_, and + DERIVEST [3]_, but the implementation follows the theory of Taylor series + more straightforwardly (and arguably naively so). + In the first iteration, the derivative is estimated using a finite + difference formula of order `order` with maximum step size `initial_step`. + Each subsequent iteration, the maximum step size is reduced by + `step_factor`, and the derivative is estimated again until a termination + condition is reached. The error estimate is the magnitude of the difference + between the current derivative approximation and that of the previous + iteration. + + The stencils of the finite difference formulae are designed such that + abscissae are "nested": after `func` is evaluated at ``order + 1`` + points in the first iteration, `func` is evaluated at only two new points + in each subsequent iteration; ``order - 1`` previously evaluated function + values required by the finite difference formula are reused, and two + function values (evaluations at the points furthest from `x`) are unused. + + Step sizes are absolute. When the step size is small relative to the + magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the + default initial step size of ``0.5`` cannot be resolved. Accordingly, + consider using larger initial step sizes for large magnitudes of `x`. + + The default tolerances are challenging to satisfy at points where the + true derivative is exactly zero. If the derivative may be exactly zero, + consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to + improve convergence. + + References + ---------- + [1]_ Hans Dembinski (@HDembinski). jacobi. + https://github.com/HDembinski/jacobi + [2]_ Per A. Brodtkorb and John D'Errico. numdifftools. + https://numdifftools.readthedocs.io/en/latest/ + [3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation. + https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation + [4]_ Numerical Differentition. Wikipedia. + https://en.wikipedia.org/wiki/Numerical_differentiation + + Examples + -------- + Evaluate the derivative of ``np.exp`` at several points ``x``. + + >>> import numpy as np + >>> from scipy.optimize._differentiate import _differentiate + >>> f = np.exp + >>> df = np.exp # true derivative + >>> x = np.linspace(1, 2, 5) + >>> res = _differentiate(f, x) + >>> res.df # approximation of the derivative + array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ]) + >>> res.error # estimate of the error + array( + [7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11] + ) + >>> abs(res.df - df(x)) # true error + array( + [3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14] + ) + + Show the convergence of the approximation as the step size is reduced. + Each iteration, the step size is reduced by `step_factor`, so for + sufficiently small initial step, each iteration reduces the error by a + factor of ``1/step_factor**order`` until finite precision arithmetic + inhibits further improvement. + + >>> iter = list(range(1, 12)) # maximum iterations + >>> hfac = 2 # step size reduction per iteration + >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps + >>> order = 4 # order of differentiation formula + >>> x = 1 + >>> ref = df(x) + >>> errors = [] # true error + >>> for i in iter: + ... res = _differentiate(f, x, maxiter=i, step_factor=hfac, + ... step_direction=hdir, order=order, + ... atol=0, rtol=0) # prevent early termination + ... errors.append(abs(res.df - ref)) + >>> errors = np.array(errors) + >>> plt.semilogy(iter, errors[:, 0], label='left differences') + >>> plt.semilogy(iter, errors[:, 1], label='central differences') + >>> plt.semilogy(iter, errors[:, 2], label='right differences') + >>> plt.xlabel('iteration') + >>> plt.ylabel('error') + >>> plt.legend() + >>> plt.show() + >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order) + (0.06215223140159822, 0.0625) + + The implementation is vectorized over `x`, `step_direction`, and `args`. + The function is evaluated once before the first iteration to perform input + validation and standardization, and once per iteration thereafter. + + >>> def f(x, p): + ... print('here') + ... f.nit += 1 + ... return x**p + >>> f.nit = 0 + >>> def df(x, p): + ... return p*x**(p-1) + >>> x = np.arange(1, 5) + >>> p = np.arange(1, 6).reshape((-1, 1)) + >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1)) + >>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1) + >>> np.allclose(res.df, df(x, p)) + True + >>> res.df.shape + (3, 5, 4) + >>> f.nit + 2 + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 5, 10, 20] + >>> res = _differentiate(f, 0, args=(c,)) + >>> shapes + [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)] + + To understand where these shapes are coming from - and to better + understand how `_differentiate` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the function's derivative change + faster, so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([11, 13, 15, 17]) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + function at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + function at an initial grid of abscissae and all four frequencies. + Successive calls to the function evaluate the function at two more + abscissae, increasing the effective order of the approximation by two. + However, in later function evaluations, the function is evaluated at + fewer frequencies because the corresponding derivative has already + converged to the required tolerance. This saves function evaluations to + improve performance, but it requires the function to accept arguments of + any shape. + + "Vector-valued" functions are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2] + + This integrand is not compatible with `_differentiate` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2] + >>> + >>> x = np.zeros(4) + >>> res = _differentiate(f, x, preserve_shape=True) + >>> shapes + [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)] + + Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the + function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``, + and this is what we observe. + + """ + # TODO (followup): + # - investigate behavior at saddle points + # - array initial_step / step_factor? + # - multivariate functions? + + res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback) + (func, x, args, atol, rtol, maxiter, order, + h0, fac, hdir, preserve_shape, callback) = res + + # Initialization + # Since f(x) (no step) is not needed for central differences, it may be + # possible to eliminate this function evaluation. However, it's useful for + # input validation and standardization, and everything else is designed to + # reduce function calls, so let's keep it simple. + temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape) + func, xs, fs, args, shape, dtype = temp + x, f = xs[0], fs[0] + df = np.full_like(f, np.nan) + # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but + # it's simpler to do it here than to generalize `_elementwise_algo_init` further. + # `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know + # that `hdir` can be broadcasted to the final shape. + hdir = np.broadcast_to(hdir, shape).flatten() + + status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress + nit, nfev = 0, 1 # one function evaluations performed above + # Boolean indices of left, central, right, and (all) one-sided steps + il = hdir < 0 + ic = hdir == 0 + ir = hdir > 0 + io = il | ir + + # Most of these attributes are reasonably obvious, but: + # - `fs` holds all the function values of all active `x`. The zeroth + # axis corresponds with active points `x`, the first axis corresponds + # with the different steps (in the order described in + # `_differentiate_weights`). + # - `terms` (which could probably use a better name) is half the `order`, + # which is always even. + work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0, + df_last=np.nan, error_last=np.nan, h0=h0, fac=fac, + atol=atol, rtol=rtol, nit=nit, nfev=nfev, + status=status, dtype=dtype, terms=(order+1)//2, + hdir=hdir, il=il, ic=ic, ir=ir, io=io) + # This is the correspondence between terms in the `work` object and the + # final result. In this case, the mapping is trivial. Note that `success` + # is prepended automatically. + res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'), + ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')] + + def pre_func_eval(work): + """Determine the abscissae at which the function needs to be evaluated. + + See `_differentiate_weights` for a description of the stencil (pattern + of the abscissae). + + In the first iteration, there is only one stored function value in + `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In + subsequent iterations, we evaluate at two new points. Note that + `work.x` is always flattened into a 1D array after broadcasting with + all `args`, so we add a new axis at the end and evaluate all point + in one call to the function. + + For improvement: + - Consider measuring the step size actually taken, since `(x + h) - x` + is not identically equal to `h` with floating point arithmetic. + - Adjust the step size automatically if `x` is too big to resolve the + step. + - We could probably save some work if there are no central difference + steps or no one-sided steps. + """ + n = work.terms # half the order + h = work.h # step size + c = work.fac # step reduction factor + d = c**0.5 # square root of step reduction factor (one-sided stencil) + # Note - no need to be careful about dtypes until we allocate `x_eval` + + if work.nit == 0: + hc = h / c**np.arange(n) + hc = np.concatenate((-hc[::-1], hc)) + else: + hc = np.asarray([-h, h]) / c**(n-1) + + if work.nit == 0: + hr = h / d**np.arange(2*n) + else: + hr = np.asarray([h, h/d]) / c**(n-1) + + n_new = 2*n if work.nit == 0 else 2 # number of new abscissae + x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype) + il, ic, ir = work.il, work.ic, work.ir + x_eval[ir] = work.x[ir, np.newaxis] + hr + x_eval[ic] = work.x[ic, np.newaxis] + hc + x_eval[il] = work.x[il, np.newaxis] - hr + return x_eval + + def post_func_eval(x, f, work): + """ Estimate the derivative and error from the function evaluations + + As in `pre_func_eval`: in the first iteration, there is only one stored + function value in `work.fs`, `f(x)`, so we need to add the `order` new + points. In subsequent iterations, we add two new points. The tricky + part is getting the order to match that of the weights, which is + described in `_differentiate_weights`. + + For improvement: + - Change the order of the weights (and steps in `pre_func_eval`) to + simplify `work_fc` concatenation and eliminate `fc` concatenation. + - It would be simple to do one-step Richardson extrapolation with `df` + and `df_last` to increase the order of the estimate and/or improve + the error estimate. + - Process the function evaluations in a more numerically favorable + way. For instance, combining the pairs of central difference evals + into a second-order approximation and using Richardson extrapolation + to produce a higher order approximation seemed to retain accuracy up + to very high order. + - Alternatively, we could use `polyfit` like Jacobi. An advantage of + fitting polynomial to more points than necessary is improved noise + tolerance. + """ + n = work.terms + n_new = n if work.nit == 0 else 1 + il, ic, io = work.il, work.ic, work.io + + # Central difference + # `work_fc` is *all* the points at which the function has been evaluated + # `fc` is the points we're using *this iteration* to produce the estimate + work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:]) + work_fc = np.concatenate(work_fc, axis=-1) + if work.nit == 0: + fc = work_fc + else: + fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:]) + fc = np.concatenate(fc, axis=-1) + + # One-sided difference + work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1) + if work.nit == 0: + fo = work_fo + else: + fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1) + + work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new)) + work.fs[ic] = work_fc + work.fs[io] = work_fo + + wc, wo = _differentiate_weights(work, n) + work.df_last = work.df.copy() + work.df[ic] = fc @ wc / work.h + work.df[io] = fo @ wo / work.h + work.df[il] *= -1 + + work.h /= work.fac + work.error_last = work.error + # Simple error estimate - the difference in derivative estimates between + # this iteration and the last. This is typically conservative because if + # convergence has begin, the true error is much closer to the difference + # between the current estimate and the *next* error estimate. However, + # we could use Richarson extrapolation to produce an error estimate that + # is one order higher, and take the difference between that and + # `work.df` (which would just be constant factor that depends on `fac`.) + work.error = abs(work.df - work.df_last) + + def check_termination(work): + """Terminate due to convergence, non-finite values, or error increase""" + stop = np.zeros_like(work.df).astype(bool) + + i = work.error < work.atol + work.rtol*abs(work.df) + work.status[i] = eim._ECONVERGED + stop[i] = True + + if work.nit > 0: + i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop) + work.df[i], work.status[i] = np.nan, eim._EVALUEERR + stop[i] = True + + # With infinite precision, there is a step size below which + # all smaller step sizes will reduce the error. But in floating point + # arithmetic, catastrophic cancellation will begin to cause the error + # to increase again. This heuristic tries to avoid step sizes that are + # too small. There may be more theoretically sound approaches for + # detecting a step size that minimizes the total error, but this + # heuristic seems simple and effective. + i = (work.error > work.error_last*10) & ~stop + work.status[i] = _EERRORINCREASE + stop[i] = True + + return stop + + def post_termination_check(work): + return + + def customize_result(res, shape): + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + preserve_shape) + + +def _differentiate_weights(work, n): + # This produces the weights of the finite difference formula for a given + # stencil. In experiments, use of a second-order central difference formula + # with Richardson extrapolation was more accurate numerically, but it was + # more complicated, and it would have become even more complicated when + # adding support for one-sided differences. However, now that all the + # function evaluation values are stored, they can be processed in whatever + # way is desired to produce the derivative estimate. We leave alternative + # approaches to future work. To be more self-contained, here is the theory + # for deriving the weights below. + # + # Recall that the Taylor expansion of a univariate, scalar-values function + # about a point `x` may be expressed as: + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # Suppose we evaluate f(x), f(x+h), and f(x-h). We have: + # f(x) = f(x) + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # We can solve for weights `wi` such that: + # w1*f(x) = w1*(f(x)) + # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # = 0 + f'(x)*h + 0 + O(h**3) + # Then + # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h + # is a finite difference derivative approximation with error O(h**2), + # and so it is said to be a "second-order" approximation. Under certain + # conditions (e.g. well-behaved function, `h` sufficiently small), the + # error in the approximation will decrease with h**2; that is, if `h` is + # reduced by a factor of 2, the error is reduced by a factor of 4. + # + # By default, we use eighth-order formulae. Our central-difference formula + # uses abscissae: + # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3 + # where `c` is the step factor. (Typically, the step factor is greater than + # one, so the outermost points - as written above - are actually closest to + # `x`.) This "stencil" is chosen so that each iteration, the step can be + # reduced by the factor `c`, and most of the function evaluations can be + # reused with the new step size. For example, in the next iteration, we + # will have: + # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4 + # We do not reuse `x-h` and `x+h` for the new derivative estimate. + # While this would increase the order of the formula and thus the + # theoretical convergence rate, it is also less stable numerically. + # (As noted above, there are other ways of processing the values that are + # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs` + # to simplify future development of this sort of improvement.) + # + # The (right) one-sided formula is produced similarly using abscissae + # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7 + # where `d` is the square root of `c`. (The left one-sided formula simply + # uses -h.) When the step size is reduced by factor `c = d**2`, we have + # abscissae: + # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9 + # `d` is chosen as the square root of `c` so that the rate of the step-size + # reduction is the same per iteration as in the central difference case. + # Note that because the central difference formulas are inherently of even + # order, for simplicity, we use only even-order formulas for one-sided + # differences, too. + + # It's possible for the user to specify `fac` in, say, double precision but + # `x` and `args` in single precision. `fac` gets converted to single + # precision, but we should always use double precision for the intermediate + # calculations here to avoid additional error in the weights. + fac = work.fac.astype(np.float64) + + # Note that if the user switches back to floating point precision with + # `x` and `args`, then `fac` will not necessarily equal the (lower + # precision) cached `_differentiate_weights.fac`, and the weights will + # need to be recalculated. This could be fixed, but it's late, and of + # low consequence. + if fac != _differentiate_weights.fac: + _differentiate_weights.central = [] + _differentiate_weights.right = [] + _differentiate_weights.fac = fac + + if len(_differentiate_weights.central) != 2*n + 1: + # Central difference weights. Consider refactoring this; it could + # probably be more compact. + i = np.arange(-n, n + 1) + p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0 + s = np.sign(i) + + h = s / fac ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2*n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + # Enforce identities to improve accuracy + weights[n] = 0 + for i in range(n): + weights[-i-1] = -weights[i] + + # Cache the weights. We only need to calculate them once unless + # the step factor changes. + _differentiate_weights.central = weights + + # One-sided difference weights. The left one-sided weights (with + # negative steps) are simply the negative of the right one-sided + # weights, so no need to compute them separately. + i = np.arange(2*n + 1) + p = i - 1. + s = np.sign(i) + + h = s / np.sqrt(fac) ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2 * n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + _differentiate_weights.right = weights + + return (_differentiate_weights.central.astype(work.dtype, copy=False), + _differentiate_weights.right.astype(work.dtype, copy=False)) +_differentiate_weights.central = [] +_differentiate_weights.right = [] +_differentiate_weights.fac = None diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb55ebc32e88a05c2cd96f42e122cdf30710ca7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5b4aa4a483ef8bab2a33e8f2dac7875b508d26f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd new file mode 100644 index 0000000000000000000000000000000000000000..503d9e74a2636d2ee192491214102b84b3c67277 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd @@ -0,0 +1,106 @@ +# cython: language_level=3 + +from libcpp cimport bool +from libcpp.string cimport string + +cdef extern from "HConst.h" nogil: + + const int HIGHS_CONST_I_INF "kHighsIInf" + const double HIGHS_CONST_INF "kHighsInf" + const double kHighsTiny + const double kHighsZero + const int kHighsThreadLimit + + cdef enum HighsDebugLevel: + HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0 + HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap" + HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly" + HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive" + HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone + HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive + + ctypedef enum HighsModelStatus: + HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0 + HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError" + HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError" + HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError" + HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError" + HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError" + HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty" + HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal" + HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible" + HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible" + HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded" + HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound" + HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget" + HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit" + HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit" + HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown" + HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET + HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN + + cdef enum HighsBasisStatus: + HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables] + HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic + HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound + HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero + HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve + + cdef enum SolverOption: + SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1 + SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE" + SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM" + + cdef enum PrimalDualStatus: + PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1 + PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET + PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION" + PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN" + PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT" + PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT" + PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT + + cdef enum HighsOptionType: + HighsOptionTypeBOOL "HighsOptionType::kBool" = 0 + HighsOptionTypeINT "HighsOptionType::kInt" + HighsOptionTypeDOUBLE "HighsOptionType::kDouble" + HighsOptionTypeSTRING "HighsOptionType::kString" + + # workaround for lack of enum class support in Cython < 3.x + # cdef enum class ObjSense(int): + # ObjSenseMINIMIZE "ObjSense::kMinimize" = 1 + # ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1 + + cdef cppclass ObjSense: + pass + + cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize" + cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize" + + # cdef enum class MatrixFormat(int): + # MatrixFormatkColwise "MatrixFormat::kColwise" = 1 + # MatrixFormatkRowwise "MatrixFormat::kRowwise" + # MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned" + + cdef cppclass MatrixFormat: + pass + + cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise" + cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise" + cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned" + + # cdef enum class HighsVarType(int): + # kContinuous "HighsVarType::kContinuous" + # kInteger "HighsVarType::kInteger" + # kSemiContinuous "HighsVarType::kSemiContinuous" + # kSemiInteger "HighsVarType::kSemiInteger" + # kImplicitInteger "HighsVarType::kImplicitInteger" + + cdef cppclass HighsVarType: + pass + + cdef HighsVarType kContinuous "HighsVarType::kContinuous" + cdef HighsVarType kInteger "HighsVarType::kInteger" + cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous" + cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger" + cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger" diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7139908d034127430b81f667548a055404ac033a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd @@ -0,0 +1,56 @@ +# cython: language_level=3 + +from libc.stdio cimport FILE + +from libcpp cimport bool +from libcpp.string cimport string + +from .HighsStatus cimport HighsStatus +from .HighsOptions cimport HighsOptions +from .HighsInfo cimport HighsInfo +from .HighsLp cimport ( + HighsLp, + HighsSolution, + HighsBasis, + ObjSense, +) +from .HConst cimport HighsModelStatus + +cdef extern from "Highs.h": + # From HiGHS/src/Highs.h + cdef cppclass Highs: + HighsStatus passHighsOptions(const HighsOptions& options) + HighsStatus passModel(const HighsLp& lp) + HighsStatus run() + HighsStatus setHighsLogfile(FILE* logfile) + HighsStatus setHighsOutput(FILE* output) + HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true) + + # split up for cython below + #const HighsModelStatus& getModelStatus(const bool scaled_model = False) const + const HighsModelStatus & getModelStatus() const + + const HighsInfo& getHighsInfo "getInfo" () const + string modelStatusToString(const HighsModelStatus model_status) const + #HighsStatus getHighsInfoValue(const string& info, int& value) + HighsStatus getHighsInfoValue(const string& info, double& value) const + const HighsOptions& getHighsOptions() const + + const HighsLp& getLp() const + + HighsStatus writeSolution(const string filename, const bool pretty) const + + HighsStatus setBasis() + const HighsSolution& getSolution() const + const HighsBasis& getBasis() const + + bool changeObjectiveSense(const ObjSense sense) + + HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value) + HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value) + HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value) + HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value) + + string primalDualStatusToString(const int primal_dual_status) + + void resetGlobalScheduler(bool blocking) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd new file mode 100644 index 0000000000000000000000000000000000000000..82b80ae643f10be9c0e40c43f8ff0693c649052c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd @@ -0,0 +1,20 @@ +# cython: language_level=3 + + +cdef extern from "HighsIO.h" nogil: + # workaround for lack of enum class support in Cython < 3.x + # cdef enum class HighsLogType(int): + # kInfo "HighsLogType::kInfo" = 1 + # kDetailed "HighsLogType::kDetailed" + # kVerbose "HighsLogType::kVerbose" + # kWarning "HighsLogType::kWarning" + # kError "HighsLogType::kError" + + cdef cppclass HighsLogType: + pass + + cdef HighsLogType kInfo "HighsLogType::kInfo" + cdef HighsLogType kDetailed "HighsLogType::kDetailed" + cdef HighsLogType kVerbose "HighsLogType::kVerbose" + cdef HighsLogType kWarning "HighsLogType::kWarning" + cdef HighsLogType kError "HighsLogType::kError" diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd new file mode 100644 index 0000000000000000000000000000000000000000..789b510898967499b1f04129b742cf505f4af75a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd @@ -0,0 +1,22 @@ +# cython: language_level=3 + +cdef extern from "HighsInfo.h" nogil: + # From HiGHS/src/lp_data/HighsInfo.h + cdef cppclass HighsInfo: + # Inherited from HighsInfoStruct: + int mip_node_count + int simplex_iteration_count + int ipm_iteration_count + int crossover_iteration_count + int primal_solution_status + int dual_solution_status + int basis_validity + double objective_function_value + double mip_dual_bound + double mip_gap + int num_primal_infeasibilities + double max_primal_infeasibility + double sum_primal_infeasibilities + int num_dual_infeasibilities + double max_dual_infeasibility + double sum_dual_infeasibilities diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0944f083743f1c34847c3060278f5b7c40869251 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd @@ -0,0 +1,46 @@ +# cython: language_level=3 + +from libcpp cimport bool +from libcpp.string cimport string +from libcpp.vector cimport vector + +from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType +from .HighsSparseMatrix cimport HighsSparseMatrix + + +cdef extern from "HighsLp.h" nogil: + # From HiGHS/src/lp_data/HighsLp.h + cdef cppclass HighsLp: + int num_col_ + int num_row_ + + vector[double] col_cost_ + vector[double] col_lower_ + vector[double] col_upper_ + vector[double] row_lower_ + vector[double] row_upper_ + + HighsSparseMatrix a_matrix_ + + ObjSense sense_ + double offset_ + + string model_name_ + + vector[string] row_names_ + vector[string] col_names_ + + vector[HighsVarType] integrality_ + + bool isMip() const + + cdef cppclass HighsSolution: + vector[double] col_value + vector[double] col_dual + vector[double] row_value + vector[double] row_dual + + cdef cppclass HighsBasis: + bool valid_ + vector[HighsBasisStatus] col_status + vector[HighsBasisStatus] row_status diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd new file mode 100644 index 0000000000000000000000000000000000000000..18ede36c146acb395754fef33e888c7434ed307f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd @@ -0,0 +1,9 @@ +# cython: language_level=3 + +from .HighsStatus cimport HighsStatus +from .HighsLp cimport HighsLp +from .HighsOptions cimport HighsOptions + +cdef extern from "HighsLpUtils.h" nogil: + # From HiGHS/src/lp_data/HighsLpUtils.h + HighsStatus assessLp(HighsLp& lp, const HighsOptions& options) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4fccc2e80046d0cee3011eaef7510802b226a85e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd @@ -0,0 +1,10 @@ +# cython: language_level=3 + +from libcpp.string cimport string + +from .HConst cimport HighsModelStatus + +cdef extern from "HighsModelUtils.h" nogil: + # From HiGHS/src/lp_data/HighsModelUtils.h + string utilHighsModelStatusToString(const HighsModelStatus model_status) + string utilBasisStatusToString(const int primal_dual_status) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd new file mode 100644 index 0000000000000000000000000000000000000000..920c10c19e30cad9229ce98bfa1e73970feb9f1e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd @@ -0,0 +1,110 @@ +# cython: language_level=3 + +from libc.stdio cimport FILE + +from libcpp cimport bool +from libcpp.string cimport string +from libcpp.vector cimport vector + +from .HConst cimport HighsOptionType + +cdef extern from "HighsOptions.h" nogil: + + cdef cppclass OptionRecord: + HighsOptionType type + string name + string description + bool advanced + + cdef cppclass OptionRecordBool(OptionRecord): + bool* value + bool default_value + + cdef cppclass OptionRecordInt(OptionRecord): + int* value + int lower_bound + int default_value + int upper_bound + + cdef cppclass OptionRecordDouble(OptionRecord): + double* value + double lower_bound + double default_value + double upper_bound + + cdef cppclass OptionRecordString(OptionRecord): + string* value + string default_value + + cdef cppclass HighsOptions: + # From HighsOptionsStruct: + + # Options read from the command line + string model_file + string presolve + string solver + string parallel + double time_limit + string options_file + + # Options read from the file + double infinite_cost + double infinite_bound + double small_matrix_value + double large_matrix_value + double primal_feasibility_tolerance + double dual_feasibility_tolerance + double ipm_optimality_tolerance + double dual_objective_value_upper_bound + int highs_debug_level + int simplex_strategy + int simplex_scale_strategy + int simplex_crash_strategy + int simplex_dual_edge_weight_strategy + int simplex_primal_edge_weight_strategy + int simplex_iteration_limit + int simplex_update_limit + int ipm_iteration_limit + int highs_min_threads + int highs_max_threads + int message_level + string solution_file + bool write_solution_to_file + bool write_solution_pretty + + # Advanced options + bool run_crossover + bool mps_parser_type_free + int keep_n_rows + int allowed_simplex_matrix_scale_factor + int allowed_simplex_cost_scale_factor + int simplex_dualise_strategy + int simplex_permute_strategy + int dual_simplex_cleanup_strategy + int simplex_price_strategy + int dual_chuzc_sort_strategy + bool simplex_initial_condition_check + double simplex_initial_condition_tolerance + double dual_steepest_edge_weight_log_error_threshhold + double dual_simplex_cost_perturbation_multiplier + double start_crossover_tolerance + bool less_infeasible_DSE_check + bool less_infeasible_DSE_choose_row + bool use_original_HFactor_logic + + # Options for MIP solver + int mip_max_nodes + int mip_report_level + + # Switch for MIP solver + bool mip + + # Options for HighsPrintMessage and HighsLogMessage + FILE* logfile + FILE* output + int message_level + string solution_file + bool write_solution_to_file + bool write_solution_pretty + + vector[OptionRecord*] records diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3e227b7a44f797469bab9ad8521c7c0273eca7d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd @@ -0,0 +1,9 @@ +# cython: language_level=3 + +from libcpp cimport bool + +from .HighsOptions cimport HighsOptions + +cdef extern from "HighsRuntimeOptions.h" nogil: + # From HiGHS/src/lp_data/HighsRuntimeOptions.h + bool loadOptions(int argc, char** argv, HighsOptions& options) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b47813b5d3917c3734476980e26b532dfc37aac4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd @@ -0,0 +1,12 @@ +# cython: language_level=3 + +from libcpp.string cimport string + +cdef extern from "HighsStatus.h" nogil: + ctypedef enum HighsStatus: + HighsStatusError "HighsStatus::kError" = -1 + HighsStatusOK "HighsStatus::kOk" = 0 + HighsStatusWarning "HighsStatus::kWarning" = 1 + + + string highsStatusToString(HighsStatus status) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd new file mode 100644 index 0000000000000000000000000000000000000000..77e7b96320d6fab81009e6a80e784c722e036b4f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd @@ -0,0 +1,95 @@ +# cython: language_level=3 + +from libcpp cimport bool + +cdef extern from "SimplexConst.h" nogil: + + cdef enum SimplexAlgorithm: + PRIMAL "SimplexAlgorithm::kPrimal" = 0 + DUAL "SimplexAlgorithm::kDual" + + cdef enum SimplexStrategy: + SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0 + SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN + SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual" + SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL + SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks" + SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti" + SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal" + SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL + SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum" + + cdef enum SimplexCrashStrategy: + SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0 + SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN + SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK" + SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K + SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby" + SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri" + SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK" + SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri" + SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf" + SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts" + SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic" + SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing" + SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING + + cdef enum SimplexEdgeWeightStrategy: + SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1 + SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN + SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig" + SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex" + SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge" + SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial" + SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL + + cdef enum SimplexPriceStrategy: + SIMPLEX_PRICE_STRATEGY_MIN = 0 + SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN + SIMPLEX_PRICE_STRATEGY_ROW + SIMPLEX_PRICE_STRATEGY_ROW_SWITCH + SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH + SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH + + cdef enum SimplexDualChuzcStrategy: + SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0 + SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN + SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD + SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP + SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH + SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH + + cdef enum InvertHint: + INVERT_HINT_NO = 0 + INVERT_HINT_UPDATE_LIMIT_REACHED + INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT + INVERT_HINT_POSSIBLY_OPTIMAL + INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED + INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED + INVERT_HINT_POSSIBLY_SINGULAR_BASIS + INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX + INVERT_HINT_CHOOSE_COLUMN_FAIL + INVERT_HINT_Count + + cdef enum DualEdgeWeightMode: + DANTZIG "DualEdgeWeightMode::DANTZIG" = 0 + DEVEX "DualEdgeWeightMode::DEVEX" + STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE" + Count "DualEdgeWeightMode::Count" + + cdef enum PriceMode: + ROW "PriceMode::ROW" = 0 + COL "PriceMode::COL" + + const int PARALLEL_THREADS_DEFAULT + const int DUAL_TASKS_MIN_THREADS + const int DUAL_MULTI_MIN_THREADS + + const bool invert_if_row_out_negative + + const int NONBASIC_FLAG_TRUE + const int NONBASIC_FLAG_FALSE + + const int NONBASIC_MOVE_UP + const int NONBASIC_MOVE_DN + const int NONBASIC_MOVE_ZE diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b7097caf30bcd298bd11f11fd8911f841eefbdde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd @@ -0,0 +1,7 @@ +# cython: language_level=3 + +cdef extern from "highs_c_api.h" nogil: + int Highs_passLp(void* highs, int numcol, int numrow, int numnz, + double* colcost, double* collower, double* colupper, + double* rowlower, double* rowupper, + int* astart, int* aindex, double* avalue) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e28140caec1a7873b565c4d2128f7aa80c51c946 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py new file mode 100644 index 0000000000000000000000000000000000000000..42ad9038ef0ce4c29b0cf22c5c9d2a1c029827c3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py @@ -0,0 +1,543 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from . import _lbfgsb +from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal . It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + Examples + -------- + Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define + an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the + observations and `y_model` the prediction of the linear model as + ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily + chosen as ``(0,5)`` and ``(5,10)`` for this example. + + >>> import numpy as np + >>> from scipy.optimize import fmin_l_bfgs_b + >>> X = np.arange(0, 10, 1) + >>> M = 2 + >>> B = 3 + >>> Y = M * X + B + >>> def func(parameters, *args): + ... x = args[0] + ... y = args[1] + ... m, b = parameters + ... y_model = m*x + b + ... error = sum(np.power((y - y_model), 2)) + ... return error + + >>> initial_values = np.array([0.0, 1.0]) + + >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True) + >>> x_opt, f_opt + array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary + + The optimized parameters in ``x_opt`` agree with the ground truth parameters + ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds` + parameter. + + >>> bounds = [(0, 5), (5, 10)] + >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True, bounds=bounds) + >>> x_opt, f_opt + array([1.65990508, 5.31649385]), 15.721334516453945 # may vary + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + callback = _wrap_callback(callback) + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + fortran_int = _lbfgsb.types.intvar.dtype + + nbd = zeros(n, fortran_int) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + l, u = bounds[0, i], bounds[1, i] + if not np.isinf(l): + low_bnd[i] = l + l = 1 + if not np.isinf(u): + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, fortran_int) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, fortran_int) + isave = zeros(44, fortran_int) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying Fortran code + # expects float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tobytes() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[:] = 'STOP: CALLBACK REQUESTED HALT' + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tobytes().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + task_str = task_str.decode() + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linesearch.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6038d2b95b13f7a2f42558a0e68023fb127352 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linesearch.py @@ -0,0 +1,897 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + line_search_armijo + line_search_wolfe1 + line_search_wolfe2 + scalar_search_wolfe1 + scalar_search_wolfe2 + +""" +from warnings import warn + +from scipy.optimize import _minpack2 as minpack2 # noqa: F401 +from ._dcsrch import DCSRCH +import numpy as np + +__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', + 'scalar_search_wolfe1', 'scalar_search_wolfe2', + 'line_search_armijo'] + +class LineSearchWarning(RuntimeWarning): + pass + + +def _check_c1_c2(c1, c2): + if not (0 < c1 < c2 < 1): + raise ValueError("'c1' and 'c2' do not satisfy" + "'0 < c1 < c2 < 1'.") + + +#------------------------------------------------------------------------------ +# Minpack's Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_wolfe1(f, fprime, xk, pk, gfk=None, + old_fval=None, old_old_fval=None, + args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, + xtol=1e-14): + """ + As `scalar_search_wolfe1` but do a line search to direction `pk` + + Parameters + ---------- + f : callable + Function `f(x)` + fprime : callable + Gradient of `f` + xk : array_like + Current point + pk : array_like + Search direction + gfk : array_like, optional + Gradient of `f` at point `xk` + old_fval : float, optional + Value of `f` at point `xk` + old_old_fval : float, optional + Value of `f` at point preceding `xk` + + The rest of the parameters are the same as for `scalar_search_wolfe1`. + + Returns + ------- + stp, f_count, g_count, fval, old_fval + As in `line_search_wolfe1` + gval : array + Gradient of `f` at the final point + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + """ + if gfk is None: + gfk = fprime(xk, *args) + + gval = [gfk] + gc = [0] + fc = [0] + + def phi(s): + fc[0] += 1 + return f(xk + s*pk, *args) + + def derphi(s): + gval[0] = fprime(xk + s*pk, *args) + gc[0] += 1 + return np.dot(gval[0], pk) + + derphi0 = np.dot(gfk, pk) + + stp, fval, old_fval = scalar_search_wolfe1( + phi, derphi, old_fval, old_old_fval, derphi0, + c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) + + return stp, fc[0], gc[0], fval, old_fval, gval[0] + + +def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, + amax=50, amin=1e-8, xtol=1e-14): + """ + Scalar function search for alpha that satisfies strong Wolfe conditions + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0 + old_phi0 : float, optional + Value of phi at previous point + derphi0 : float, optional + Value derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax, amin : float, optional + Maximum and minimum step size + xtol : float, optional + Relative tolerance for an acceptable step. + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found + phi : float + Value of `phi` at the new point `alpha` + phi0 : float + Value of `phi` at `alpha=0` + + Notes + ----- + Uses routine DCSRCH from MINPACK. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_. + + References + ---------- + + .. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization. + In Springer Series in Operations Research and Financial Engineering. + (Springer Series in Operations Research and Financial Engineering). + Springer Nature. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + if derphi0 is None: + derphi0 = derphi(0.) + + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + if alpha1 < 0: + alpha1 = 1.0 + else: + alpha1 = 1.0 + + maxiter = 100 + + dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax) + stp, phi1, phi0, task = dcsrch( + alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter + ) + + return stp, phi1, phi0 + + +line_search = line_search_wolfe1 + + +#------------------------------------------------------------------------------ +# Pure-Python Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +# Note: `line_search_wolfe2` is the public `scipy.optimize.line_search` + +def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, + old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + Parameters + ---------- + f : callable f(x,*args) + Objective function. + myfprime : callable f'(x,*args) + Objective function gradient. + xk : ndarray + Starting point. + pk : ndarray + Search direction. The search direction must be a descent direction + for the algorithm to converge. + gfk : ndarray, optional + Gradient value for x=xk (xk being the current parameter + estimate). Will be recomputed if omitted. + old_fval : float, optional + Function value for x=xk. Will be recomputed if omitted. + old_old_fval : float, optional + Function value for the point preceding x=xk. + args : tuple, optional + Additional arguments passed to objective function. + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, x, f, g)`` + returning a boolean. Arguments are the proposed step ``alpha`` + and the corresponding ``x``, ``f`` and ``g`` values. The line search + accepts the value of ``alpha`` only if this + callable returns ``True``. If the callable returns ``False`` + for the step length, the algorithm will continue with + new iterates. The callable is only called for iterates + satisfying the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha : float or None + Alpha for which ``x_new = x0 + alpha * pk``, + or None if the line search algorithm did not converge. + fc : int + Number of function evaluations made. + gc : int + Number of gradient evaluations made. + new_fval : float or None + New function value ``f(x_new)=f(x0+alpha*pk)``, + or None if the line search algorithm did not converge. + old_fval : float + Old function value ``f(x0)``. + new_slope : float or None + The local slope along the search direction at the + new value ````, + or None if the line search algorithm did not converge. + + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + The search direction `pk` must be a descent direction (e.g. + ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe + conditions. If the search direction is not a descent direction (e.g. + ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import line_search + + A objective function and its gradient are defined. + + >>> def obj_func(x): + ... return (x[0])**2+(x[1])**2 + >>> def obj_grad(x): + ... return [2*x[0], 2*x[1]] + + We can find alpha that satisfies strong Wolfe conditions. + + >>> start_point = np.array([1.8, 1.7]) + >>> search_gradient = np.array([-1.0, -1.0]) + >>> line_search(obj_func, obj_grad, start_point, search_gradient) + (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4]) + + """ + fc = [0] + gc = [0] + gval = [None] + gval_alpha = [None] + + def phi(alpha): + fc[0] += 1 + return f(xk + alpha * pk, *args) + + fprime = myfprime + + def derphi(alpha): + gc[0] += 1 + gval[0] = fprime(xk + alpha * pk, *args) # store for later use + gval_alpha[0] = alpha + return np.dot(gval[0], pk) + + if gfk is None: + gfk = fprime(xk, *args) + derphi0 = np.dot(gfk, pk) + + if extra_condition is not None: + # Add the current gradient as argument, to avoid needless + # re-evaluation + def extra_condition2(alpha, phi): + if gval_alpha[0] != alpha: + derphi(alpha) + x = xk + alpha * pk + return extra_condition(alpha, x, phi, gval[0]) + else: + extra_condition2 = None + + alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( + phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, + extra_condition2, maxiter=maxiter) + + if derphi_star is None: + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + else: + # derphi_star is a number (derphi) -- so use the most recently + # calculated gradient used in computing it derphi = gfk*pk + # this is the gradient at the next step no need to compute it + # again in the outer loop. + derphi_star = gval[0] + + return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star + + +def scalar_search_wolfe2(phi, derphi, phi0=None, + old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Objective scalar function. + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + phi0 : float, optional + Value of phi at 0. + old_phi0 : float, optional + Value of phi at previous point. + derphi0 : float, optional + Value of derphi at 0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size. + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, phi_value)`` + returning a boolean. The line search accepts the value + of ``alpha`` only if this callable returns ``True``. + If the callable returns ``False`` for the step length, + the algorithm will continue with new iterates. + The callable is only called for iterates satisfying + the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform. + + Returns + ------- + alpha_star : float or None + Best alpha, or None if the line search algorithm did not converge. + phi_star : float + phi at alpha_star. + phi0 : float + phi at 0. + derphi_star : float or None + derphi at alpha_star, or None if the line search algorithm + did not converge. + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pp. 59-61. + + """ + _check_c1_c2(c1, c2) + + if phi0 is None: + phi0 = phi(0.) + + if derphi0 is None: + derphi0 = derphi(0.) + + alpha0 = 0 + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + else: + alpha1 = 1.0 + + if alpha1 < 0: + alpha1 = 1.0 + + if amax is not None: + alpha1 = min(alpha1, amax) + + phi_a1 = phi(alpha1) + #derphi_a1 = derphi(alpha1) evaluated below + + phi_a0 = phi0 + derphi_a0 = derphi0 + + if extra_condition is None: + def extra_condition(alpha, phi): + return True + + for i in range(maxiter): + if alpha1 == 0 or (amax is not None and alpha0 > amax): + # alpha1 == 0: This shouldn't happen. Perhaps the increment has + # slipped below machine precision? + alpha_star = None + phi_star = phi0 + phi0 = old_phi0 + derphi_star = None + + if alpha1 == 0: + msg = 'Rounding errors prevent the line search from converging' + else: + msg = "The line search algorithm could not find a solution " + \ + "less than or equal to amax: %s" % amax + + warn(msg, LineSearchWarning, stacklevel=2) + break + + not_first_iteration = i > 0 + if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ + ((phi_a1 >= phi_a0) and not_first_iteration): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha0, alpha1, phi_a0, + phi_a1, derphi_a0, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + derphi_a1 = derphi(alpha1) + if (abs(derphi_a1) <= -c2*derphi0): + if extra_condition(alpha1, phi_a1): + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = derphi_a1 + break + + if (derphi_a1 >= 0): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha1, alpha0, phi_a1, + phi_a0, derphi_a1, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + alpha2 = 2 * alpha1 # increase by factor of two on each iteration + if amax is not None: + alpha2 = min(alpha2, amax) + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi(alpha1) + derphi_a0 = derphi_a1 + + else: + # stopping test maxiter reached + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = None + warn('The line search algorithm did not converge', + LineSearchWarning, stacklevel=2) + + return alpha_star, phi_star, phi0, derphi_star + + +def _cubicmin(a, fa, fpa, b, fb, c, fc): + """ + Finds the minimizer for a cubic polynomial that goes through the + points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. + + If no minimizer can be found, return None. + + """ + # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D + + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + C = fpa + db = b - a + dc = c - a + denom = (db * dc) ** 2 * (db - dc) + d1 = np.empty((2, 2)) + d1[0, 0] = dc ** 2 + d1[0, 1] = -db ** 2 + d1[1, 0] = -dc ** 3 + d1[1, 1] = db ** 3 + [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, + fc - fa - C * dc]).flatten()) + A /= denom + B /= denom + radical = B * B - 3 * A * C + xmin = a + (-B + np.sqrt(radical)) / (3 * A) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _quadmin(a, fa, fpa, b, fb): + """ + Finds the minimizer for a quadratic polynomial that goes through + the points (a,fa), (b,fb) with derivative at a of fpa. + + """ + # f(x) = B*(x-a)^2 + C*(x-a) + D + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + D = fa + C = fpa + db = b - a * 1.0 + B = (fb - D - C * db) / (db * db) + xmin = a - C / (2.0 * B) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, + phi, derphi, phi0, derphi0, c1, c2, extra_condition): + """Zoom stage of approximate linesearch satisfying strong Wolfe conditions. + + Part of the optimization algorithm in `scalar_search_wolfe2`. + + Notes + ----- + Implements Algorithm 3.6 (zoom) in Wright and Nocedal, + 'Numerical Optimization', 1999, pp. 61. + + """ + + maxiter = 10 + i = 0 + delta1 = 0.2 # cubic interpolant check + delta2 = 0.1 # quadratic interpolant check + phi_rec = phi0 + a_rec = 0 + while True: + # interpolate to find a trial step length between a_lo and + # a_hi Need to choose interpolation here. Use cubic + # interpolation and then if the result is within delta * + # dalpha or outside of the interval bounded by a_lo or a_hi + # then use quadratic interpolation, if the result is still too + # close, then use bisection + + dalpha = a_hi - a_lo + if dalpha < 0: + a, b = a_hi, a_lo + else: + a, b = a_lo, a_hi + + # minimizer of cubic interpolant + # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) + # + # if the result is too close to the end points (or out of the + # interval), then use quadratic interpolation with phi_lo, + # derphi_lo and phi_hi if the result is still too close to the + # end points (or out of the interval) then use bisection + + if (i > 0): + cchk = delta1 * dalpha + a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, + a_rec, phi_rec) + if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): + qchk = delta2 * dalpha + a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) + if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): + a_j = a_lo + 0.5*dalpha + + # Check new value of a_j + + phi_aj = phi(a_j) + if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_j + phi_hi = phi_aj + else: + derphi_aj = derphi(a_j) + if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): + a_star = a_j + val_star = phi_aj + valprime_star = derphi_aj + break + if derphi_aj*(a_hi - a_lo) >= 0: + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_lo + phi_hi = phi_lo + else: + phi_rec = phi_lo + a_rec = a_lo + a_lo = a_j + phi_lo = phi_aj + derphi_lo = derphi_aj + i += 1 + if (i > maxiter): + # Failed to find a conforming step size + a_star = None + val_star = None + valprime_star = None + break + return a_star, val_star, valprime_star + + +#------------------------------------------------------------------------------ +# Armijo line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """Minimize over alpha, the function ``f(xk+alpha pk)``. + + Parameters + ---------- + f : callable + Function to be minimized. + xk : array_like + Current point. + pk : array_like + Search direction. + gfk : array_like + Gradient of `f` at point `xk`. + old_fval : float + Value of `f` at point `xk`. + args : tuple, optional + Optional arguments. + c1 : float, optional + Value to control stopping criterion. + alpha0 : scalar, optional + Value of `alpha` at start of the optimization. + + Returns + ------- + alpha + f_count + f_val_at_alpha + + Notes + ----- + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + """ + xk = np.atleast_1d(xk) + fc = [0] + + def phi(alpha1): + fc[0] += 1 + return f(xk + alpha1*pk, *args) + + if old_fval is None: + phi0 = phi(0.) + else: + phi0 = old_fval # compute f(xk) -- done in past loop + + derphi0 = np.dot(gfk, pk) + alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, + alpha0=alpha0) + return alpha, fc[0], phi1 + + +def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """ + Compatibility wrapper for `line_search_armijo` + """ + r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, + alpha0=alpha0) + return r[0], r[1], 0, r[2] + + +def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): + """Minimize over alpha, the function ``phi(alpha)``. + + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 + + alpha > 0 is assumed to be a descent direction. + + Returns + ------- + alpha + phi1 + + """ + phi_a0 = phi(alpha0) + if phi_a0 <= phi0 + c1*alpha0*derphi0: + return alpha0, phi_a0 + + # Otherwise, compute the minimizer of a quadratic interpolant: + + alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) + phi_a1 = phi(alpha1) + + if (phi_a1 <= phi0 + c1*alpha1*derphi0): + return alpha1, phi_a1 + + # Otherwise, loop with cubic interpolation until we find an alpha which + # satisfies the first Wolfe condition (since we are backtracking, we will + # assume that the value of alpha is not too small and satisfies the second + # condition. + + while alpha1 > amin: # we are assuming alpha>0 is a descent direction + factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) + a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ + alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) + a = a / factor + b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ + alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) + b = b / factor + + alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) + phi_a2 = phi(alpha2) + + if (phi_a2 <= phi0 + c1*alpha2*derphi0): + return alpha2, phi_a2 + + if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: + alpha2 = alpha1 / 2.0 + + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi_a2 + + # Failed to find a suitable step length + return None, phi_a1 + + +#------------------------------------------------------------------------------ +# Non-monotone line search for DF-SANE +#------------------------------------------------------------------------------ + +def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5): + """ + Nonmonotone backtracking line search as described in [1]_ + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + prev_fs : float + List of previous merit function values. Should have ``len(prev_fs) <= M`` + where ``M`` is the nonmonotonicity window parameter. + eta : float + Allowed merit function increase, see [1]_ + gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + + References + ---------- + [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + + """ + f_k = prev_fs[-1] + f_bar = max(prev_fs) + + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + return alpha, xp, fp, Fp + + +def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5, + nu=0.85): + """ + Nonmonotone line search from [1] + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position. + d : ndarray + Search direction. + f_k : float + Initial merit function value. + C, Q : float + Control parameters. On the first iteration, give values + Q=1.0, C=f_k + eta : float + Allowed merit function increase, see [1]_ + nu, gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + C : float + New value for the control parameter C + Q : float + New value for the control parameter Q + + References + ---------- + .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line + search and its application to the spectral residual + method'', IMA J. Numer. Anal. 29, 814 (2009). + + """ + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + # Update C and Q + Q_next = nu * Q + 1 + C = (nu * Q * (C + eta) + fp) / Q_next + Q = Q_next + + return alpha, xp, fp, Fp, C, Q diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog.py new file mode 100644 index 0000000000000000000000000000000000000000..5deb51bd455857e9c3767bb13157bb9ab8a86a34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog.py @@ -0,0 +1,714 @@ +""" +A top-level linear programming interface. + +.. versionadded:: 0.15.0 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + linprog + linprog_verbose_callback + linprog_terse_callback + +""" + +import numpy as np + +from ._optimize import OptimizeResult, OptimizeWarning +from warnings import warn +from ._linprog_highs import _linprog_highs +from ._linprog_ip import _linprog_ip +from ._linprog_simplex import _linprog_simplex +from ._linprog_rs import _linprog_rs +from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401 + _linprog_rs_doc, _linprog_simplex_doc, + _linprog_highs_ipm_doc, _linprog_highs_ds_doc) +from ._linprog_util import ( + _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale, + _postsolve, _check_result, _display_summary) +from copy import deepcopy + +__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] + +__docformat__ = "restructuredtext en" + +LINPROG_METHODS = [ + 'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm' +] + + +def linprog_verbose_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces detailed output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + x = res['x'] + fun = res['fun'] + phase = res['phase'] + status = res['status'] + nit = res['nit'] + message = res['message'] + complete = res['complete'] + + saved_printoptions = np.get_printoptions() + np.set_printoptions(linewidth=500, + formatter={'float': lambda x: f"{x: 12.4f}"}) + if status: + print('--------- Simplex Early Exit -------\n') + print(f'The simplex method exited early with status {status:d}') + print(message) + elif complete: + print('--------- Simplex Complete --------\n') + print(f'Iterations required: {nit}') + else: + print(f'--------- Iteration {nit:d} ---------\n') + + if nit > 0: + if phase == 1: + print('Current Pseudo-Objective Value:') + else: + print('Current Objective Value:') + print('f = ', fun) + print() + print('Current Solution Vector:') + print('x = ', x) + print() + + np.set_printoptions(**saved_printoptions) + + +def linprog_terse_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces brief output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1-D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x``. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + nit = res['nit'] + x = res['x'] + + if nit == 0: + print("Iter: X:") + print(f"{nit: <5d} ", end="") + print(x) + + +def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=(0, None), method='highs', callback=None, + options=None, x0=None, integrality=None): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + - minimize :: + + c @ x + + - such that :: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be + specified with ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. + If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` + will serve as bounds for all decision variables. + Use ``None`` to indicate that there is no bound. For instance, the + default bound ``(0, None)`` means that all decision variables are + non-negative, and the pair ``(None, None)`` means no bounds at all, + i.e. all variables are allowed to be any real. + method : str, optional + The algorithm used to solve the standard form problem. + :ref:`'highs' ` (default), + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (legacy), + :ref:`'revised simplex' ` (legacy), + and + :ref:`'simplex' ` (legacy) are supported. + The legacy methods are deprecated and will be removed in SciPy 1.11.0. + callback : callable, optional + If a callback function is provided, it will be called at least once per + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + The current solution vector. + fun : float + The current value of the objective function ``c @ x``. + success : bool + ``True`` when the algorithm has completed successfully. + slack : 1-D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + phase : int + The phase of the algorithm being executed. + status : int + An integer representing the status of the algorithm. + + ``0`` : Optimization proceeding nominally. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The current iteration number. + message : str + A string descriptor of the algorithm status. + + Callback functions are not currently supported by the HiGHS methods. + + options : dict, optional + A dictionary of solver options. All methods accept the following + options: + + maxiter : int + Maximum number of iterations to perform. + Default: see method-specific documentation. + disp : bool + Set to ``True`` to print convergence messages. + Default: ``False``. + presolve : bool + Set to ``False`` to disable automatic presolve. + Default: ``True``. + + All methods except the HiGHS solvers also accept: + + tol : float + A tolerance which determines when a residual is "close enough" to + zero to be considered exactly zero. + autoscale : bool + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + Default: ``False``. + rr : bool + Set to ``False`` to disable automatic redundancy removal. + Default: ``True``. + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. For problems with + dense input, the available methods for redundancy removal are: + + "SVD": + Repeatedly performs singular value decomposition on + the matrix, detecting redundant rows based on nonzeros + in the left singular vectors that correspond with + zero singular values. May be fast when the matrix is + nearly full rank. + "pivot": + Uses the algorithm presented in [5]_ to identify + redundant rows. + "ID": + Uses a randomized interpolative decomposition. + Identifies columns of the matrix transpose not used in + a full-rank interpolative decomposition of the matrix. + None: + Uses "svd" if the matrix is nearly full rank, that is, + the difference between the matrix rank and the number + of rows is less than five. If not, uses "pivot". The + behavior of this default is subject to change without + prior notice. + + Default: None. + For problems with sparse input, this option is ignored, and the + pivot-based algorithm presented in [5]_ is used. + + For method-specific options, see + :func:`show_options('linprog') `. + + x0 : 1-D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcasted to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields + below. Note that the return types of the fields may depend on whether + the optimization was successful, therefore it is recommended to check + `OptimizeResult.status` before relying on the other fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + nit : int + The total number of iterations performed in all phases. + message : str + A string descriptor of the exit status of the algorithm. + + See Also + -------- + show_options : Additional options accepted by the solvers. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + `'highs-ds'` and + `'highs-ipm'` are interfaces to the + HiGHS simplex and interior-point method solvers [13]_, respectively. + `'highs'` (default) chooses between + the two automatically. These are the fastest linear + programming solvers in SciPy, especially for large, sparse problems; + which of these two is faster is problem-dependent. + The other solvers (`'interior-point'`, `'revised simplex'`, and + `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0. + + Method *highs-ds* is a wrapper of the C++ high performance dual + revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm* + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method *highs* chooses between the two automatically. + For new code involving `linprog`, we recommend explicitly choosing one of + these three method values. + + .. versionadded:: 1.6.0 + + Method *interior-point* uses the primal-dual path following algorithm + as outlined in [4]_. This algorithm supports sparse constraint matrices and + is typically faster than the simplex methods, especially for large, sparse + problems. Note, however, that the solution returned may be slightly less + accurate than those of the simplex methods and will not, in general, + correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + + Method *revised simplex* uses the revised simplex method as described in + [9]_, except that a factorization [11]_ of the basis matrix, rather than + its inverse, is efficiently maintained and used to solve the linear systems + at each iteration of the algorithm. + + .. versionadded:: 1.3.0 + + Method *simplex* uses a traditional, full-tableau implementation of + Dantzig's simplex algorithm [1]_, [2]_ (*not* the + Nelder-Mead simplex). This algorithm is included for backwards + compatibility and educational purposes. + + .. versionadded:: 0.15.0 + + Before applying *interior-point*, *revised simplex*, or *simplex*, + a presolve procedure based on [8]_ attempts + to identify trivial infeasibilities, trivial unboundedness, and potential + problem simplifications. Specifically, it checks for: + + - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; + - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained + variables; + - column singletons in ``A_eq``, representing fixed variables; and + - column singletons in ``A_ub``, representing simple bounds. + + If presolve reveals that the problem is unbounded (e.g. an unconstrained + and unbounded variable has negative cost) or infeasible (e.g., a row of + zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver + terminates with the appropriate status code. Note that presolve terminates + as soon as any sign of unboundedness is detected; consequently, a problem + may be reported as unbounded when in reality the problem is infeasible + (but infeasibility has not been detected yet). Therefore, if it is + important to know whether the problem is actually infeasible, solve the + problem again with option ``presolve=False``. + + If neither infeasibility nor unboundedness are detected in a single pass + of the presolve, bounds are tightened where possible and fixed + variables are removed from the problem. Then, linearly dependent rows + of the ``A_eq`` matrix are removed, (unless they represent an + infeasibility) to avoid numerical difficulties in the primary solve + routine. Note that rows that are nearly linearly dependent (within a + prescribed tolerance) may also be removed, which can change the optimal + solution in rare cases. If this is a concern, eliminate redundancy from + your problem formulation and run with option ``rr=False`` or + ``presolve=False``. + + Several potential improvements can be made here: additional presolve + checks outlined in [8]_ should be implemented, the presolve routine should + be run multiple times (until no further simplifications can be made), and + more of the efficiency improvements from [5]_ should be implemented in the + redundancy removal routines. + + After presolve, the problem is transformed to standard form by converting + the (tightened) simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + Optionally, the problem is automatically scaled via equilibration [12]_. + The selected algorithm solves the standard form problem, and a + postprocessing routine converts the result to a solution to the original + problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." + Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at + http://www.4er.org/CourseNotes/Book%20B/B-III.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + .. [11] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + .. [12] Tomlin, J. A. "On scaling linear programming problems." + Mathematical Programming Study 4 (1975): 146-166. + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the following problem: + + .. math:: + + \min_{x_0, x_1} \ -x_0 + 4x_1 & \\ + \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ + -x_0 - 2x_1 & \geq -4,\\ + x_1 & \geq -3. + + The problem is not presented in the form accepted by `linprog`. This is + easily remedied by converting the "greater than" inequality + constraint to a "less than" inequality constraint by + multiplying both sides by a factor of :math:`-1`. Note also that the last + constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. + Finally, since there are no bounds on :math:`x_0`, we must explicitly + specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the + default is for variables to be non-negative. After collecting coeffecients + into arrays and tuples, the input for this problem is: + + >>> from scipy.optimize import linprog + >>> c = [-1, 4] + >>> A = [[-3, 1], [1, 2]] + >>> b = [6, 4] + >>> x0_bounds = (None, None) + >>> x1_bounds = (-3, None) + >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) + >>> res.fun + -22.0 + >>> res.x + array([10., -3.]) + >>> res.message + 'Optimization terminated successfully. (HiGHS Status 7: Optimal)' + + The marginals (AKA dual values / shadow prices / Lagrange multipliers) + and residuals (slacks) are also available. + + >>> res.ineqlin + residual: [ 3.900e+01 0.000e+00] + marginals: [-0.000e+00 -1.000e+00] + + For example, because the marginal associated with the second inequality + constraint is -1, we expect the optimal value of the objective function + to decrease by ``eps`` if we add a small amount ``eps`` to the right hand + side of the second inequality constraint: + + >>> eps = 0.05 + >>> b[1] += eps + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.05 + + Also, because the residual on the first inequality constraint is 39, we + can decrease the right hand side of the first constraint by 39 without + affecting the optimal solution. + + >>> b = [6, 4] # reset to original values + >>> b[0] -= 39 + >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun + -22.0 + + """ + + meth = method.lower() + methods = {"highs", "highs-ds", "highs-ipm", + "simplex", "revised simplex", "interior-point"} + + if meth not in methods: + raise ValueError(f"Unknown solver '{method}'") + + if x0 is not None and meth != "revised simplex": + warning_message = "x0 is used only when method is 'revised simplex'. " + warn(warning_message, OptimizeWarning, stacklevel=2) + + if np.any(integrality) and not meth == "highs": + integrality = None + warning_message = ("Only `method='highs'` supports integer " + "constraints. Ignoring `integrality`.") + warn(warning_message, OptimizeWarning, stacklevel=2) + elif np.any(integrality): + integrality = np.broadcast_to(integrality, np.shape(c)) + + lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality) + lp, solver_options = _parse_linprog(lp, options, meth) + tol = solver_options.get('tol', 1e-9) + + # Give unmodified problem to HiGHS + if meth.startswith('highs'): + if callback is not None: + raise NotImplementedError("HiGHS solvers do not support the " + "callback interface.") + highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex', + 'highs': None} + + sol = _linprog_highs(lp, solver=highs_solvers[meth], + **solver_options) + sol['status'], sol['message'] = ( + _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'], + sol['con'], lp.bounds, tol, sol['message'], + integrality)) + sol['success'] = sol['status'] == 0 + return OptimizeResult(sol) + + warn(f"`method='{meth}'` is deprecated and will be removed in SciPy " + "1.11.0. Please use one of the HiGHS solvers (e.g. " + "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2) + + iteration = 0 + complete = False # will become True if solved in presolve + undo = [] + + # Keep the original arrays to calculate slack/residuals for original + # problem. + lp_o = deepcopy(lp) + + # Solve trivial problem, eliminate variables, tighten bounds, etc. + rr_method = solver_options.pop('rr_method', None) # need to pop these; + rr = solver_options.pop('rr', True) # they're not passed to methods + c0 = 0 # we might get a constant term in the objective + if solver_options.pop('presolve', True): + (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, + rr_method, + tol) + + C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used + postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale) + + if not complete: + A, b, c, c0, x0 = _get_Abc(lp, c0) + if solver_options.pop('autoscale', False): + A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0) + postsolve_args = postsolve_args[:-2] + (C, b_scale) + + if meth == 'simplex': + x, status, message, iteration = _linprog_simplex( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'interior-point': + x, status, message, iteration = _linprog_ip( + c, c0=c0, A=A, b=b, callback=callback, + postsolve_args=postsolve_args, **solver_options) + elif meth == 'revised simplex': + x, status, message, iteration = _linprog_rs( + c, c0=c0, A=A, b=b, x0=x0, callback=callback, + postsolve_args=postsolve_args, **solver_options) + + # Eliminate artificial variables, re-introduce presolved variables, etc. + disp = solver_options.get('disp', False) + + x, fun, slack, con = _postsolve(x, postsolve_args, complete) + + status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, + tol, message, integrality) + + if disp: + _display_summary(message, status, fun, iteration) + + sol = { + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': iteration, + 'success': status == 0} + + return OptimizeResult(sol) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..56c914134bdef816c8eb00eb4ab011475cd5b4ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py @@ -0,0 +1,1434 @@ +""" +Created on Sat Aug 22 19:49:17 2020 + +@author: matth +""" + + +def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + simplex_dual_edge_weight_strategy=None, + mip_rel_gap=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using one of the HiGHS solvers. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs', which chooses + automatically between + :ref:`'highs-ds' ` and + :ref:`'highs-ipm' `. + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcasted to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + For :ref:`'highs-ipm' `, this does not + include the number of crossover iterations. Default is the largest + possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + Dual feasibility tolerance for + :ref:`'highs-ds' `. + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + primal_feasibility_tolerance : double (default: 1e-07) + Primal feasibility tolerance for + :ref:`'highs-ds' `. + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + ipm_optimality_tolerance : double (default: ``1e-08``) + Optimality tolerance for + :ref:`'highs-ipm' `. + Minimum allowable value is 1e-12. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + mip_rel_gap : double (default: None) + Termination criterion for MIP solver: solver will terminate when the + gap between the primal objective value and the dual objective bound, + scaled by the primal objective value, is <= mip_rel_gap. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For the HiGHS simplex method, this includes iterations in all + phases. For the HiGHS interior-point method, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for the HiGHS interior-point method. + This is ``0`` for the HiGHS simplex method. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + pass + + +def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs-ds', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + simplex_dual_edge_weight_strategy=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the HiGHS dual simplex solver. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs-ds'. + :ref:`'highs' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + Default is the largest possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + Dual feasibility tolerance for + :ref:`'highs-ds' `. + primal_feasibility_tolerance : double (default: 1e-07) + Primal feasibility tolerance for + :ref:`'highs-ds' `. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. This includes iterations + in all phases. + crossover_nit : int + This is always ``0`` for the HiGHS simplex method. + For the HiGHS interior-point method, this is the number of + primal/dual pushes performed during the crossover routine. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + pass + + +def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='highs-ipm', callback=None, + maxiter=None, disp=False, presolve=True, + time_limit=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the HiGHS interior point solver. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + + This is the method-specific documentation for 'highs-ipm'. + :ref:`'highs-ipm' `, + :ref:`'highs-ds' `, + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + For :ref:`'highs-ipm' `, this does not + include the number of crossover iterations. Default is the largest + possible value for an ``int`` on the platform. + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + time_limit : float + The maximum time in seconds allotted to solve the problem; + default is the largest possible value for a ``double`` on the + platform. + dual_feasibility_tolerance : double (default: 1e-07) + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + primal_feasibility_tolerance : double (default: 1e-07) + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance of + :ref:`'highs-ipm' `. + ipm_optimality_tolerance : double (default: ``1e-08``) + Optimality tolerance for + :ref:`'highs-ipm' `. + Minimum allowable value is 1e-12. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing + all unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For the HiGHS interior-point method, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for the HiGHS interior-point method. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + Notes + ----- + + Method :ref:`'highs-ipm' ` + is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint + **m**\ ethod [13]_; it features a crossover routine, so it is as accurate + as a simplex solver. + Method :ref:`'highs-ds' ` is a wrapper + of the C++ high performance dual revised simplex implementation (HSOL) + [13]_, [14]_. Method :ref:`'highs' ` chooses + between the two automatically. For new code involving `linprog`, we + recommend explicitly choosing one of these three method values instead of + :ref:`'interior-point' ` (default), + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy). + + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + """ + pass + + +def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + maxiter=1000, disp=False, presolve=True, + tol=1e-8, autoscale=False, rr=True, + alpha0=.99995, beta=0.1, sparse=False, + lstsq=False, sym_pos=True, cholesky=True, pc=True, + ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the interior-point method of + [4]_. + + .. deprecated:: 1.9.0 + `method='interior-point'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'interior-point'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'revised simplex' `, and + :ref:`'simplex' ` (legacy) + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + + Options + ------- + maxiter : int (default: 1000) + The maximum number of iterations of the algorithm. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + alpha0 : float (default: 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default: 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default: False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default: ``False``) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default: True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default: True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for problems + that are numerically well-behaved. + pc : bool (default: True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default: False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default: 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``, and no SuiteSparse.) + A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With default options, the solver used to perform the factorization depends + on third-party software availability and the conditioning of the problem. + + For dense problems, solvers are tried in the following order: + + 1. ``scipy.linalg.cho_factor`` + + 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` + + 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` + + 4. ``scipy.linalg.lstsq`` + + For sparse problems: + + 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are + installed) + + 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse + are installed) + + 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) + + 4. ``scipy.sparse.linalg.lsqr`` + + If the solver fails for any reason, successively more robust (but slower) + solvers are attempted in the order indicated. Attempting, failing, and + re-starting factorization can be time consuming, so if the problem is + numerically challenging, options can be set to bypass solvers that are + failing. Setting ``cholesky=False`` skips to solver 2, + ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips + to solver 4 for both sparse and dense problems. + + Potential improvements for combatting issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem + is automatically converted to the form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + for solution. That is, the original problem contains equality, upper-bound + and variable constraints whereas the method specific solver requires + equality constraints and variable non-negativity. ``linprog`` converts the + original problem to standard form by converting the simple bounds to upper + bound constraints, introducing non-negative slack variables for inequality + constraints, and expressing unbounded variables as the difference between + two non-negative variables. The problem is converted back to the original + form before results are reported. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + """ + pass + + +def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + x0=None, maxiter=5000, disp=False, presolve=True, + tol=1e-12, autoscale=False, rr=True, maxupdate=10, + mast=False, pivot="mrc", **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the revised simplex method. + + .. deprecated:: 1.9.0 + `method='revised simplex'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'revised simplex'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + and :ref:`'simplex' ` (legacy) + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + x0 : 1-D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + Options + ------- + maxiter : int (default: 5000) + The maximum number of iterations to perform in either phase. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-12) + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + maxupdate : int (default: 10) + The maximum number of updates performed on the LU factorization. + After this many updates is reached, the basis matrix is factorized + from scratch. + mast : bool (default: False) + Minimize Amortized Solve Time. If enabled, the average time to solve + a linear system using the basis factorization is measured. Typically, + the average solve time will decrease with each successive solve after + initial factorization, as factorization takes much more time than the + solve operation (and updates). Eventually, however, the updated + factorization becomes sufficiently complex that the average solve time + begins to increase. When this is detected, the basis is refactorized + from scratch. Enable this option to maximize speed at the risk of + nondeterministic behavior. Ignored if ``maxupdate`` is 0. + pivot : "mrc" or "bland" (default: "mrc") + Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland"). + Choose Bland's rule if iteration limit is reached and cycling is + suspected. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + ``5`` : Problem has no constraints; turn presolve on. + + ``6`` : Invalid guess provided. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + + Notes + ----- + Method *revised simplex* uses the revised simplex method as described in + [9]_, except that a factorization [11]_ of the basis matrix, rather than + its inverse, is efficiently maintained and used to solve the linear systems + at each iteration of the algorithm. + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [11] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + """ + pass + + +def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='interior-point', callback=None, + maxiter=5000, disp=False, presolve=True, + tol=1e-12, autoscale=False, rr=True, bland=False, + **unknown_options): + r""" + Linear programming: minimize a linear objective function subject to linear + equality and inequality constraints using the tableau-based simplex method. + + .. deprecated:: 1.9.0 + `method='simplex'` will be removed in SciPy 1.11.0. + It is replaced by `method='highs'` because the latter is + faster and more robust. + + Linear programming solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ + & A_{eq} x = b_{eq},\\ + & l \leq x \leq u , + + where :math:`x` is a vector of decision variables; :math:`c`, + :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and + :math:`A_{ub}` and :math:`A_{eq}` are matrices. + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + Note that by default ``lb = 0`` and ``ub = None`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1-D array + The coefficients of the linear objective function to be minimized. + A_ub : 2-D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1-D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2-D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1-D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : sequence, optional + A sequence of ``(min, max)`` pairs for each element in ``x``, defining + the minimum and maximum values of that decision variable. Use ``None`` + to indicate that there is no bound. By default, bounds are + ``(0, None)`` (all decision variables are non-negative). + If a single tuple ``(min, max)`` is provided, then ``min`` and + ``max`` will serve as bounds for all decision variables. + method : str + This is the method-specific documentation for 'simplex'. + :ref:`'highs' `, + :ref:`'highs-ds' `, + :ref:`'highs-ipm' `, + :ref:`'interior-point' ` (default), + and :ref:`'revised simplex' ` + are also available. + callback : callable, optional + Callback function to be executed once per iteration. + + Options + ------- + maxiter : int (default: 5000) + The maximum number of iterations to perform in either phase. + disp : bool (default: False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + presolve : bool (default: True) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if + presolve is to be disabled. + tol : float (default: 1e-12) + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + autoscale : bool (default: False) + Set to ``True`` to automatically perform equilibration. + Consider using this option if the numerical values in the + constraints are separated by several orders of magnitude. + rr : bool (default: True) + Set to ``False`` to disable automatic redundancy removal. + bland : bool + If True, use Bland's anti-cycling rule [3]_ to choose pivots to + prevent cycling. If False, choose pivots which should lead to a + converged solution more quickly. The latter method is subject to + cycling (non-convergence) in rare instances. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1-D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1-D array + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : Numerical difficulties encountered. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed in all phases. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py new file mode 100644 index 0000000000000000000000000000000000000000..eb07443bb255471e6e0ac487bd6749253bf5d133 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py @@ -0,0 +1,440 @@ +"""HiGHS Linear Optimization Methods + +Interface to HiGHS linear optimization software. +https://highs.dev/ + +.. versionadded:: 1.5.0 + +References +---------- +.. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex + method." Mathematical Programming Computation, 10 (1), 119-142, + 2018. DOI: 10.1007/s12532-017-0130-5 + +""" + +import inspect +import numpy as np +from ._optimize import OptimizeWarning, OptimizeResult +from warnings import warn +from ._highs._highs_wrapper import _highs_wrapper +from ._highs._highs_constants import ( + CONST_INF, + MESSAGE_LEVEL_NONE, + HIGHS_OBJECTIVE_SENSE_MINIMIZE, + + MODEL_STATUS_NOTSET, + MODEL_STATUS_LOAD_ERROR, + MODEL_STATUS_MODEL_ERROR, + MODEL_STATUS_PRESOLVE_ERROR, + MODEL_STATUS_SOLVE_ERROR, + MODEL_STATUS_POSTSOLVE_ERROR, + MODEL_STATUS_MODEL_EMPTY, + MODEL_STATUS_OPTIMAL, + MODEL_STATUS_INFEASIBLE, + MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE, + MODEL_STATUS_UNBOUNDED, + MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND + as MODEL_STATUS_RDOVUB, + MODEL_STATUS_REACHED_OBJECTIVE_TARGET, + MODEL_STATUS_REACHED_TIME_LIMIT, + MODEL_STATUS_REACHED_ITERATION_LIMIT, + + HIGHS_SIMPLEX_STRATEGY_DUAL, + + HIGHS_SIMPLEX_CRASH_STRATEGY_OFF, + + HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE, + HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG, + HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX, + HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE, +) +from scipy.sparse import csc_matrix, vstack, issparse + + +def _highs_to_scipy_status_message(highs_status, highs_message): + """Converts HiGHS status number/message to SciPy status number/message""" + + scipy_statuses_messages = { + None: (4, "HiGHS did not provide a status code. "), + MODEL_STATUS_NOTSET: (4, ""), + MODEL_STATUS_LOAD_ERROR: (4, ""), + MODEL_STATUS_MODEL_ERROR: (2, ""), + MODEL_STATUS_PRESOLVE_ERROR: (4, ""), + MODEL_STATUS_SOLVE_ERROR: (4, ""), + MODEL_STATUS_POSTSOLVE_ERROR: (4, ""), + MODEL_STATUS_MODEL_EMPTY: (4, ""), + MODEL_STATUS_RDOVUB: (4, ""), + MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""), + MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "), + MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "), + MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "), + MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "), + MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "), + MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded " + "or infeasible. ")} + unrecognized = (4, "The HiGHS status code was not recognized. ") + scipy_status, scipy_message = ( + scipy_statuses_messages.get(highs_status, unrecognized)) + scipy_message = (f"{scipy_message}" + f"(HiGHS Status {highs_status}: {highs_message})") + return scipy_status, scipy_message + + +def _replace_inf(x): + # Replace `np.inf` with CONST_INF + infs = np.isinf(x) + with np.errstate(invalid="ignore"): + x[infs] = np.sign(x[infs])*CONST_INF + return x + + +def _convert_to_highs_enum(option, option_str, choices): + # If option is in the choices we can look it up, if not use + # the default value taken from function signature and warn: + try: + return choices[option.lower()] + except AttributeError: + return choices[option] + except KeyError: + sig = inspect.signature(_linprog_highs) + default_str = sig.parameters[option_str].default + warn(f"Option {option_str} is {option}, but only values in " + f"{set(choices.keys())} are allowed. Using default: " + f"{default_str}.", + OptimizeWarning, stacklevel=3) + return choices[default_str] + + +def _linprog_highs(lp, solver, time_limit=None, presolve=True, + disp=False, maxiter=None, + dual_feasibility_tolerance=None, + primal_feasibility_tolerance=None, + ipm_optimality_tolerance=None, + simplex_dual_edge_weight_strategy=None, + mip_rel_gap=None, + mip_max_nodes=None, + **unknown_options): + r""" + Solve the following linear programming problem using one of the HiGHS + solvers: + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + lp : _LPProblem + A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``. + solver : "ipm" or "simplex" or None + Which HiGHS solver to use. If ``None``, "simplex" will be used. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. For + ``solver='ipm'``, this does not include the number of crossover + iterations. Default is the largest possible value for an ``int`` + on the platform. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration; default ``False``. + time_limit : float + The maximum time in seconds allotted to solve the problem; default is + the largest possible value for a ``double`` on the platform. + presolve : bool + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. It is generally recommended + to keep the default setting ``True``; set to ``False`` if presolve is + to be disabled. + dual_feasibility_tolerance : double + Dual feasibility tolerance. Default is 1e-07. + The minimum of this and ``primal_feasibility_tolerance`` + is used for the feasibility tolerance when ``solver='ipm'``. + primal_feasibility_tolerance : double + Primal feasibility tolerance. Default is 1e-07. + The minimum of this and ``dual_feasibility_tolerance`` + is used for the feasibility tolerance when ``solver='ipm'``. + ipm_optimality_tolerance : double + Optimality tolerance for ``solver='ipm'``. Default is 1e-08. + Minimum possible value is 1e-12 and must be smaller than the largest + possible value for a ``double`` on the platform. + simplex_dual_edge_weight_strategy : str (default: None) + Strategy for simplex dual edge weights. The default, ``None``, + automatically selects one of the following. + + ``'dantzig'`` uses Dantzig's original strategy of choosing the most + negative reduced cost. + + ``'devex'`` uses the strategy described in [15]_. + + ``steepest`` uses the exact steepest edge strategy as described in + [16]_. + + ``'steepest-devex'`` begins with the exact steepest edge strategy + until the computation is too costly or inexact and then switches to + the devex method. + + Currently, using ``None`` always selects ``'steepest-devex'``, but this + may change as new options become available. + + mip_max_nodes : int + The maximum number of nodes allotted to solve the problem; default is + the largest possible value for a ``HighsInt`` on the platform. + Ignored if not using the MIP solver. + unknown_options : dict + Optional arguments not used by this particular solver. If + ``unknown_options`` is non-empty, a warning is issued listing all + unused options. + + Returns + ------- + sol : dict + A dictionary consisting of the fields: + + x : 1D array + The values of the decision variables that minimizes the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + slack : 1D array + The (nominally positive) values of the slack, + ``b_ub - A_ub @ x``. + con : 1D array + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + success : bool + ``True`` when the algorithm succeeds in finding an optimal + solution. + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimization terminated successfully. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem appears to be infeasible. + + ``3`` : Problem appears to be unbounded. + + ``4`` : The HiGHS solver ran into a problem. + + message : str + A string descriptor of the exit status of the algorithm. + nit : int + The total number of iterations performed. + For ``solver='simplex'``, this includes iterations in all + phases. For ``solver='ipm'``, this does not include + crossover iterations. + crossover_nit : int + The number of primal/dual pushes performed during the + crossover routine for ``solver='ipm'``. This is ``0`` + for ``solver='simplex'``. + ineqlin : OptimizeResult + Solution and sensitivity information corresponding to the + inequality constraints, `b_ub`. A dictionary consisting of the + fields: + + residual : np.ndnarray + The (nominally positive) values of the slack variables, + ``b_ub - A_ub @ x``. This quantity is also commonly + referred to as "slack". + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + inequality constraints, `b_ub`. + + eqlin : OptimizeResult + Solution and sensitivity information corresponding to the + equality constraints, `b_eq`. A dictionary consisting of the + fields: + + residual : np.ndarray + The (nominally zero) residuals of the equality constraints, + ``b_eq - A_eq @ x``. + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the right-hand side of the + equality constraints, `b_eq`. + + lower, upper : OptimizeResult + Solution and sensitivity information corresponding to the + lower and upper bounds on decision variables, `bounds`. + + residual : np.ndarray + The (nominally positive) values of the quantity + ``x - lb`` (lower) or ``ub - x`` (upper). + + marginals : np.ndarray + The sensitivity (partial derivative) of the objective + function with respect to the lower and upper + `bounds`. + + mip_node_count : int + The number of subproblems or "nodes" solved by the MILP + solver. Only present when `integrality` is not `None`. + + mip_dual_bound : float + The MILP solver's final estimate of the lower bound on the + optimal solution. Only present when `integrality` is not + `None`. + + mip_gap : float + The difference between the final objective function value + and the final dual bound, scaled by the final objective + function value. Only present when `integrality` is not + `None`. + + Notes + ----- + The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain + `marginals`, or partial derivatives of the objective function with respect + to the right-hand side of each constraint. These partial derivatives are + also referred to as "Lagrange multipliers", "dual values", and + "shadow prices". The sign convention of `marginals` is opposite that + of Lagrange multipliers produced by many nonlinear solvers. + + References + ---------- + .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." + Mathematical programming 5.1 (1973): 1-28. + .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge + simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. + """ + if unknown_options: + message = (f"Unrecognized options detected: {unknown_options}. " + "These will be passed to HiGHS verbatim.") + warn(message, OptimizeWarning, stacklevel=3) + + # Map options to HiGHS enum values + simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum( + simplex_dual_edge_weight_strategy, + 'simplex_dual_edge_weight_strategy', + choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG, + 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX, + 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE, + 'steepest': + HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE, + None: None}) + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs + # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints + with np.errstate(invalid="ignore"): + lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf + rhs_ub = b_ub # RHS of UB constraints is b_ub + lhs_eq = b_eq # Equality constraint is inequality + rhs_eq = b_eq # constraint with LHS=RHS + lhs = np.concatenate((lhs_ub, lhs_eq)) + rhs = np.concatenate((rhs_ub, rhs_eq)) + + if issparse(A_ub) or issparse(A_eq): + A = vstack((A_ub, A_eq)) + else: + A = np.vstack((A_ub, A_eq)) + A = csc_matrix(A) + + options = { + 'presolve': presolve, + 'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE, + 'solver': solver, + 'time_limit': time_limit, + 'highs_debug_level': MESSAGE_LEVEL_NONE, + 'dual_feasibility_tolerance': dual_feasibility_tolerance, + 'ipm_optimality_tolerance': ipm_optimality_tolerance, + 'log_to_console': disp, + 'mip_max_nodes': mip_max_nodes, + 'output_flag': disp, + 'primal_feasibility_tolerance': primal_feasibility_tolerance, + 'simplex_dual_edge_weight_strategy': + simplex_dual_edge_weight_strategy_enum, + 'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL, + 'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF, + 'ipm_iteration_limit': maxiter, + 'simplex_iteration_limit': maxiter, + 'mip_rel_gap': mip_rel_gap, + } + options.update(unknown_options) + + # np.inf doesn't work; use very large constant + rhs = _replace_inf(rhs) + lhs = _replace_inf(lhs) + lb = _replace_inf(lb) + ub = _replace_inf(ub) + + if integrality is None or np.sum(integrality) == 0: + integrality = np.empty(0) + else: + integrality = np.array(integrality) + + res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs, + lb, ub, integrality.astype(np.uint8), options) + + # HiGHS represents constraints as lhs/rhs, so + # Ax + s = b => Ax = b - s + # and we need to split up s by A_ub and A_eq + if 'slack' in res: + slack = res['slack'] + con = np.array(slack[len(b_ub):]) + slack = np.array(slack[:len(b_ub)]) + else: + slack, con = None, None + + # lagrange multipliers for equalities/inequalities and upper/lower bounds + if 'lambda' in res: + lamda = res['lambda'] + marg_ineqlin = np.array(lamda[:len(b_ub)]) + marg_eqlin = np.array(lamda[len(b_ub):]) + marg_upper = np.array(res['marg_bnds'][1, :]) + marg_lower = np.array(res['marg_bnds'][0, :]) + else: + marg_ineqlin, marg_eqlin = None, None + marg_upper, marg_lower = None, None + + # this needs to be updated if we start choosing the solver intelligently + + # Convert to scipy-style status and message + highs_status = res.get('status', None) + highs_message = res.get('message', None) + status, message = _highs_to_scipy_status_message(highs_status, + highs_message) + + x = np.array(res['x']) if 'x' in res else None + sol = {'x': x, + 'slack': slack, + 'con': con, + 'ineqlin': OptimizeResult({ + 'residual': slack, + 'marginals': marg_ineqlin, + }), + 'eqlin': OptimizeResult({ + 'residual': con, + 'marginals': marg_eqlin, + }), + 'lower': OptimizeResult({ + 'residual': None if x is None else x - lb, + 'marginals': marg_lower, + }), + 'upper': OptimizeResult({ + 'residual': None if x is None else ub - x, + 'marginals': marg_upper + }), + 'fun': res.get('fun'), + 'status': status, + 'success': res['status'] == MODEL_STATUS_OPTIMAL, + 'message': message, + 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0), + 'crossover_nit': res.get('crossover_nit'), + } + + if np.any(x) and integrality is not None: + sol.update({ + 'mip_node_count': res.get('mip_node_count', 0), + 'mip_dual_bound': res.get('mip_dual_bound', 0.0), + 'mip_gap': res.get('mip_gap', 0.0), + }) + + return sol diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py new file mode 100644 index 0000000000000000000000000000000000000000..b13418c369864ca528efe76d9f45c07da2bcf680 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py @@ -0,0 +1,661 @@ +"""Simplex method for linear programming + +The *simplex* method uses a traditional, full-tableau implementation of +Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex). +This algorithm is included for backwards compatibility and educational +purposes. + + .. versionadded:: 0.15.0 + +Warnings +-------- + +The simplex method may encounter numerical difficulties when pivot +values are close to the specified tolerance. If encountered try +remove any redundant constraints, change the pivot strategy to Bland's +rule or increase the tolerance value. + +Alternatively, more robust methods maybe be used. See +:ref:`'interior-point' ` and +:ref:`'revised simplex' `. + +References +---------- +.. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 +.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. +""" + +import numpy as np +from warnings import warn +from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options +from ._linprog_util import _postsolve + + +def _pivot_col(T, tol=1e-9, bland=False): + """ + Given a linear programming simplex tableau, determine the column + of the variable to enter the basis. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + tol : float + Elements in the objective row larger than -tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the column (select the + first column with a negative coefficient in the objective row, + regardless of magnitude). + + Returns + ------- + status: bool + True if a suitable pivot column was found, otherwise False. + A return of False indicates that the linear programming simplex + algorithm is complete. + col: int + The index of the column of the pivot element. + If status is False, col will be returned as nan. + """ + ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False) + if ma.count() == 0: + return False, np.nan + if bland: + # ma.mask is sometimes 0d + return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0] + return True, np.ma.nonzero(ma == ma.min())[0][0] + + +def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False): + """ + Given a linear programming simplex tableau, determine the row for the + pivot operation. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a Problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : array + A list of the current basic variables. + pivcol : int + The index of the pivot column. + phase : int + The phase of the simplex algorithm (1 or 2). + tol : float + Elements in the pivot column smaller than tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the row (if more than one + row can be used, choose the one with the lowest variable index). + + Returns + ------- + status: bool + True if a suitable pivot row was found, otherwise False. A return + of False indicates that the linear programming problem is unbounded. + row: int + The index of the row of the pivot element. If status is False, row + will be returned as nan. + """ + if phase == 1: + k = 2 + else: + k = 1 + ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False) + if ma.count() == 0: + return False, np.nan + mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False) + q = mb / ma + min_rows = np.ma.nonzero(q == q.min())[0] + if bland: + return True, min_rows[np.argmin(np.take(basis, min_rows))] + return True, min_rows[0] + + +def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9): + """ + Pivot the simplex tableau inplace on the element given by (pivrow, pivol). + The entering variable corresponds to the column given by pivcol forcing + the variable basis[pivrow] to leave the basis. + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : 1-D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _apply_pivot. + pivrow : int + Row index of the pivot. + pivcol : int + Column index of the pivot. + """ + basis[pivrow] = pivcol + pivval = T[pivrow, pivcol] + T[pivrow] = T[pivrow] / pivval + for irow in range(T.shape[0]): + if irow != pivrow: + T[irow] = T[irow] - T[pivrow] * T[irow, pivcol] + + # The selected pivot should never lead to a pivot value less than the tol. + if np.isclose(pivval, tol, atol=0, rtol=1e4): + message = ( + f"The pivot operation produces a pivot value of:{pivval: .1e}, " + "which is only slightly greater than the specified " + f"tolerance{tol: .1e}. This may lead to issues regarding the " + "numerical stability of the simplex method. " + "Removing redundant constraints, changing the pivot strategy " + "via Bland's rule or increasing the tolerance may " + "help reduce the issue.") + warn(message, OptimizeWarning, stacklevel=5) + + +def _solve_simplex(T, n, basis, callback, postsolve_args, + maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0, + ): + """ + Solve a linear programming problem in "standard form" using the Simplex + Method. Linear Programming is intended to solve the following problem form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Parameters + ---------- + T : 2-D array + A 2-D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + n : int + The number of true variables in the problem. + basis : 1-D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _solve_simplex + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback must accept a + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True only when a phase has completed successfully. This + will be False for most iterations. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + maxiter : int + The maximum number of iterations to perform before aborting the + optimization. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + bland : bool + If True, choose pivots using Bland's rule [3]_. In problems which + fail to converge due to cycling, using Bland's rule can provide + convergence at the expense of a less optimal path about the simplex. + nit0 : int + The initial iteration number used to keep an accurate iteration total + in a two-phase problem. + + Returns + ------- + nit : int + The number of iterations. Used to keep an accurate iteration total + in the two-phase problem. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + """ + nit = nit0 + status = 0 + message = '' + complete = False + + if phase == 1: + m = T.shape[1]-2 + elif phase == 2: + m = T.shape[1]-1 + else: + raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") + + if phase == 2: + # Check if any artificial variables are still in the basis. + # If yes, check if any coefficients from this row and a column + # corresponding to one of the non-artificial variable is non-zero. + # If found, pivot at this term. If not, start phase 2. + # Do this for all artificial variables in the basis. + # Ref: "An Introduction to Linear Programming and Game Theory" + # by Paul R. Thie, Gerard E. Keough, 3rd Ed, + # Chapter 3.7 Redundant Systems (pag 102) + for pivrow in [row for row in range(basis.size) + if basis[row] > T.shape[1] - 2]: + non_zero_row = [col for col in range(T.shape[1] - 1) + if abs(T[pivrow, col]) > tol] + if len(non_zero_row) > 0: + pivcol = non_zero_row[0] + _apply_pivot(T, basis, pivrow, pivcol, tol) + nit += 1 + + if len(basis[:m]) == 0: + solution = np.empty(T.shape[1] - 1, dtype=np.float64) + else: + solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1), + dtype=np.float64) + + while not complete: + # Find the pivot column + pivcol_found, pivcol = _pivot_col(T, tol, bland) + if not pivcol_found: + pivcol = np.nan + pivrow = np.nan + status = 0 + complete = True + else: + # Find the pivot row + pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland) + if not pivrow_found: + status = 3 + complete = True + + if callback is not None: + solution[:] = 0 + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + x, fun, slack, con = _postsolve( + x, postsolve_args + ) + res = OptimizeResult({ + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': nit, + 'success': status == 0 and complete, + 'phase': phase, + 'complete': complete, + }) + callback(res) + + if not complete: + if nit >= maxiter: + # Iteration limit exceeded + status = 1 + complete = True + else: + _apply_pivot(T, basis, pivrow, pivcol, tol) + nit += 1 + return nit, status + + +def _linprog_simplex(c, c0, A, b, callback, postsolve_args, + maxiter=1000, tol=1e-9, disp=False, bland=False, + **unknown_options): + """ + Minimize a linear objective function subject to linear equality and + non-negativity constraints using the two phase simplex method. + Linear programming is intended to solve problems of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True when an algorithm has completed successfully. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the algorithm being executed. + status : int + An integer representing the status of the optimization:: + + 0 : Algorithm proceeding nominally + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int + The maximum number of iterations to perform. + disp : bool + If True, print exit status message to sys.stdout + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + bland : bool + If True, use Bland's anti-cycling rule [3]_ to choose pivots to + prevent cycling. If False, choose pivots which should lead to a + converged solution more quickly. The latter method is subject to + cycling (non-convergence) in rare instances. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + + + Notes + ----- + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + """ + _check_unknown_options(unknown_options) + + status = 0 + messages = {0: "Optimization terminated successfully.", + 1: "Iteration limit reached.", + 2: "Optimization failed. Unable to find a feasible" + " starting point.", + 3: "Optimization failed. The problem appears to be unbounded.", + 4: "Optimization failed. Singular matrix encountered."} + + n, m = A.shape + + # All constraints must have b >= 0. + is_negative_constraint = np.less(b, 0) + A[is_negative_constraint] *= -1 + b[is_negative_constraint] *= -1 + + # As all constraints are equality constraints the artificial variables + # will also be basic variables. + av = np.arange(n) + m + basis = av.copy() + + # Format the phase one tableau by adding artificial variables and stacking + # the constraints, the objective row and pseudo-objective row. + row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis])) + row_objective = np.hstack((c, np.zeros(n), c0)) + row_pseudo_objective = -row_constraints.sum(axis=0) + row_pseudo_objective[av] = 0 + T = np.vstack((row_constraints, row_objective, row_pseudo_objective)) + + nit1, status = _solve_simplex(T, n, basis, callback=callback, + postsolve_args=postsolve_args, + maxiter=maxiter, tol=tol, phase=1, + bland=bland + ) + # if pseudo objective is zero, remove the last row from the tableau and + # proceed to phase 2 + nit2 = nit1 + if abs(T[-1, -1]) < tol: + # Remove the pseudo-objective row from the tableau + T = T[:-1, :] + # Remove the artificial variable columns from the tableau + T = np.delete(T, av, 1) + else: + # Failure to find a feasible starting point + status = 2 + messages[status] = ( + "Phase 1 of the simplex method failed to find a feasible " + "solution. The pseudo-objective function evaluates to {0:.1e} " + "which exceeds the required tolerance of {1} for a solution to be " + "considered 'close enough' to zero to be a basic solution. " + "Consider increasing the tolerance to be greater than {0:.1e}. " + "If this tolerance is unacceptably large the problem may be " + "infeasible.".format(abs(T[-1, -1]), tol) + ) + + if status == 0: + # Phase 2 + nit2, status = _solve_simplex(T, n, basis, callback=callback, + postsolve_args=postsolve_args, + maxiter=maxiter, tol=tol, phase=2, + bland=bland, nit0=nit1 + ) + + solution = np.zeros(n + m) + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + + return x, status, messages[status], int(nit2) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3d25cee4d9ce6b1c5a40cc474d97ec13474ebafc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py @@ -0,0 +1,1522 @@ +""" +Method agnostic utility functions for linear programming +""" + +import numpy as np +import scipy.sparse as sps +from warnings import warn +from ._optimize import OptimizeWarning +from scipy.optimize._remove_redundancy import ( + _remove_redundancy_svd, _remove_redundancy_pivot_sparse, + _remove_redundancy_pivot_dense, _remove_redundancy_id + ) +from collections import namedtuple + +_LPProblem = namedtuple('_LPProblem', + 'c A_ub b_ub A_eq b_eq bounds x0 integrality') +_LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg +_LPProblem.__doc__ = \ + """ Represents a linear-programming problem. + + Attributes + ---------- + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats + are: + * a 2D array (N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, the bounds can be specified as + a 1-D or 2-D array or sequence with 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + Absent lower and/or upper bounds can be specified as -numpy.inf (no + lower bound), numpy.inf (no upper bound) or None (both). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + integrality : 1-D array or int, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. + + For mixed integrality constraints, supply an array of shape `c.shape`. + To infer a constraint on each decision variable from shorter inputs, + the argument will be broadcasted to `c.shape` using `np.broadcast_to`. + + This argument is currently used only by the ``'highs'`` method and + ignored otherwise. + + Notes + ----- + This namedtuple supports 2 ways of initialization: + >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4]) + >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4]) + + Note that only ``c`` is a required argument here, whereas all other arguments + ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with + default values of None. + For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``: + >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10]) + """ + + +def _check_sparse_inputs(options, meth, A_ub, A_eq): + """ + Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified + optional sparsity variables. + + Parameters + ---------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + method : str, optional + The algorithm used to solve the standard form problem. + + Returns + ------- + A_ub : 2-D array, optional + 2-D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2-D array, optional + 2-D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + """ + # This is an undocumented option for unit testing sparse presolve + _sparse_presolve = options.pop('_sparse_presolve', False) + if _sparse_presolve and A_eq is not None: + A_eq = sps.coo_matrix(A_eq) + if _sparse_presolve and A_ub is not None: + A_ub = sps.coo_matrix(A_ub) + + sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub) + + preferred_methods = {"highs", "highs-ds", "highs-ipm"} + dense_methods = {"simplex", "revised simplex"} + if meth in dense_methods and sparse_constraint: + raise ValueError(f"Method '{meth}' does not support sparse " + "constraint matrices. Please consider using one of " + f"{preferred_methods}.") + + sparse = options.get('sparse', False) + if not sparse and sparse_constraint and meth == 'interior-point': + options['sparse'] = True + warn("Sparse constraint matrix detected; setting 'sparse':True.", + OptimizeWarning, stacklevel=4) + return options, A_ub, A_eq + + +def _format_A_constraints(A, n_x, sparse_lhs=False): + """Format the left hand side of the constraints to a 2-D array + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + n_x : int + The number of variables in the linear programming problem. + sparse_lhs : bool + Whether either of `A_ub` or `A_eq` are sparse. If true return a + coo_matrix instead of a numpy array. + + Returns + ------- + np.ndarray or sparse.coo_matrix + 2-D array such that ``A @ x`` gives the values of the upper-bound + (in)equality constraints at ``x``. + + """ + if sparse_lhs: + return sps.coo_matrix( + (0, n_x) if A is None else A, dtype=float, copy=True + ) + elif A is None: + return np.zeros((0, n_x), dtype=float) + else: + return np.array(A, dtype=float, copy=True) + + +def _format_b_constraints(b): + """Format the upper bounds of the constraints to a 1-D array + + Parameters + ---------- + b : 1-D array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + Returns + ------- + 1-D np.array + 1-D array of values representing the upper-bound of each (in)equality + constraint (row) in ``A``. + + """ + if b is None: + return np.array([], dtype=float) + b = np.array(b, dtype=float, copy=True).squeeze() + return b if b.size != 1 else b.reshape(-1) + + +def _clean_inputs(lp): + """ + Given user inputs for a linear programming problem, return the + objective vector, upper bound constraints, equality constraints, + and simple bounds in a preferred format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if c is None: + raise TypeError + + try: + c = np.array(c, dtype=np.float64, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: c must be a 1-D array of numerical " + "coefficients") from e + else: + # If c is a single value, convert it to a 1-D array. + if c.size == 1: + c = c.reshape(-1) + + n_x = len(c) + if n_x == 0 or len(c.shape) != 1: + raise ValueError( + "Invalid input for linprog: c must be a 1-D array and must " + "not have more than one non-singleton dimension") + if not np.isfinite(c).all(): + raise ValueError( + "Invalid input for linprog: c must not contain values " + "inf, nan, or None") + + sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub) + try: + A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_ub must be a 2-D array " + "of numerical values") from e + else: + n_ub = A_ub.shape[0] + if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_ub must have exactly two " + "dimensions, and the number of columns in A_ub must be " + "equal to the size of c") + if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() + or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): + raise ValueError( + "Invalid input for linprog: A_ub must not contain values " + "inf, nan, or None") + + try: + b_ub = _format_b_constraints(b_ub) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_ub must be a 1-D array of " + "numerical values, each representing the upper bound of an " + "inequality constraint (row) in A_ub") from e + else: + if b_ub.shape != (n_ub,): + raise ValueError( + "Invalid input for linprog: b_ub must be a 1-D array; b_ub " + "must not have more than one non-singleton dimension and " + "the number of rows in A_ub must equal the number of values " + "in b_ub") + if not np.isfinite(b_ub).all(): + raise ValueError( + "Invalid input for linprog: b_ub must not contain values " + "inf, nan, or None") + + try: + A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: A_eq must be a 2-D array " + "of numerical values") from e + else: + n_eq = A_eq.shape[0] + if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x: + raise ValueError( + "Invalid input for linprog: A_eq must have exactly two " + "dimensions, and the number of columns in A_eq must be " + "equal to the size of c") + + if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() + or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): + raise ValueError( + "Invalid input for linprog: A_eq must not contain values " + "inf, nan, or None") + + try: + b_eq = _format_b_constraints(b_eq) + except ValueError as e: + raise TypeError( + "Invalid input for linprog: b_eq must be a dense, 1-D array of " + "numerical values, each representing the right hand side of an " + "equality constraint (row) in A_eq") from e + else: + if b_eq.shape != (n_eq,): + raise ValueError( + "Invalid input for linprog: b_eq must be a 1-D array; b_eq " + "must not have more than one non-singleton dimension and " + "the number of rows in A_eq must equal the number of values " + "in b_eq") + if not np.isfinite(b_eq).all(): + raise ValueError( + "Invalid input for linprog: b_eq must not contain values " + "inf, nan, or None") + + # x0 gives a (optional) starting solution to the solver. If x0 is None, + # skip the checks. Initial solution will be generated automatically. + if x0 is not None: + try: + x0 = np.array(x0, dtype=float, copy=True).squeeze() + except ValueError as e: + raise TypeError( + "Invalid input for linprog: x0 must be a 1-D array of " + "numerical coefficients") from e + if x0.ndim == 0: + x0 = x0.reshape(-1) + if len(x0) == 0 or x0.ndim != 1: + raise ValueError( + "Invalid input for linprog: x0 should be a 1-D array; it " + "must not have more than one non-singleton dimension") + if not x0.size == c.size: + raise ValueError( + "Invalid input for linprog: x0 and c should contain the " + "same number of elements") + if not np.isfinite(x0).all(): + raise ValueError( + "Invalid input for linprog: x0 must not contain values " + "inf, nan, or None") + + # Bounds can be one of these formats: + # (1) a 2-D array or sequence, with shape N x 2 + # (2) a 1-D or 2-D sequence or array with 2 scalars + # (3) None (or an empty sequence or array) + # Unspecified bounds can be represented by None or (-)np.inf. + # All formats are converted into a N x 2 np.array with (-)np.inf where + # bounds are unspecified. + + # Prepare clean bounds array + bounds_clean = np.zeros((n_x, 2), dtype=float) + + # Convert to a numpy array. + # np.array(..,dtype=float) raises an error if dimensions are inconsistent + # or if there are invalid data types in bounds. Just add a linprog prefix + # to the error and re-raise. + # Creating at least a 2-D array simplifies the cases to distinguish below. + if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]): + bounds = (0, np.inf) + try: + bounds_conv = np.atleast_2d(np.array(bounds, dtype=float)) + except ValueError as e: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + except TypeError as e: + raise TypeError( + "Invalid input for linprog: unable to interpret bounds, " + "check values and dimensions: " + e.args[0]) from e + + # Check bounds options + bsh = bounds_conv.shape + if len(bsh) > 2: + # Do not try to handle multidimensional bounds input + raise ValueError( + "Invalid input for linprog: provide a 2-D array for bounds, " + f"not a {len(bsh):d}-D array.") + elif np.all(bsh == (n_x, 2)): + # Regular N x 2 array + bounds_clean = bounds_conv + elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))): + # 2 values: interpret as overall lower and upper bound + bounds_flat = bounds_conv.flatten() + bounds_clean[:, 0] = bounds_flat[0] + bounds_clean[:, 1] = bounds_flat[1] + elif np.all(bsh == (2, n_x)): + # Reject a 2 x N array + raise ValueError( + f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, " + f"not a 2 x {n_x:d} array.") + else: + raise ValueError( + "Invalid input for linprog: unable to interpret bounds with this " + f"dimension tuple: {bsh}.") + + # The process above creates nan-s where the input specified None + # Convert the nan-s in the 1st column to -np.inf and in the 2nd column + # to np.inf + i_none = np.isnan(bounds_clean[:, 0]) + bounds_clean[i_none, 0] = -np.inf + i_none = np.isnan(bounds_clean[:, 1]) + bounds_clean[i_none, 1] = np.inf + + return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality) + + +def _presolve(lp, rr, rr_method, tol=1e-9): + """ + Given inputs for a linear programming problem in preferred format, + presolve the problem: identify trivial infeasibilities, redundancies, + and unboundedness, tighten bounds where possible, and eliminate fixed + variables. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + rr : bool + If ``True`` attempts to eliminate any redundant rows in ``A_eq``. + Set False if ``A_eq`` is known to be of full row rank, or if you are + looking for a potential speedup (at the expense of reliability). + rr_method : string + Method used to identify and remove redundant rows from the + equality constraint matrix after presolve. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : 1D array + Constant term in objective function due to fixed (and eliminated) + variables. + x : 1D array + Solution vector (when the solution is trivial and can be determined + in presolve) + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + (currently, the revstack contains only one function) + complete: bool + Whether the solution is complete (solved or determined to be infeasible + or unbounded in presolve) + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + + """ + # ideas from Reference [5] by Andersen and Andersen + # however, unlike the reference, this is performed before converting + # problem to standard form + # There are a few advantages: + # * artificial variables have not been added, so matrices are smaller + # * bounds have not been converted to constraints yet. (It is better to + # do that after presolve because presolve may adjust the simple bounds.) + # There are many improvements that can be made, namely: + # * implement remaining checks from [5] + # * loop presolve until no additional changes are made + # * implement additional efficiency improvements in redundancy removal [2] + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp + + revstack = [] # record of variables eliminated from problem + # constant term in cost function may be added if variables are eliminated + c0 = 0 + complete = False # complete is True if detected infeasible/unbounded + x = np.zeros(c.shape) # this is solution vector if completed in presolve + + status = 0 # all OK unless determined otherwise + message = "" + + # Lower and upper bounds. Copy to prevent feedback. + lb = bounds[:, 0].copy() + ub = bounds[:, 1].copy() + + m_eq, n = A_eq.shape + m_ub, n = A_ub.shape + + if (rr_method is not None + and rr_method.lower() not in {"svd", "pivot", "id"}): + message = ("'" + str(rr_method) + "' is not a valid option " + "for redundancy removal. Valid options are 'SVD', " + "'pivot', and 'ID'.") + raise ValueError(message) + + if sps.issparse(A_eq): + A_eq = A_eq.tocsr() + A_ub = A_ub.tocsr() + + def where(A): + return A.nonzero() + + vstack = sps.vstack + else: + where = np.where + vstack = np.vstack + + # upper bounds > lower bounds + if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf): + status = 2 + message = ("The problem is (trivially) infeasible since one " + "or more upper bounds are smaller than the corresponding " + "lower bounds, a lower bound is np.inf or an upper bound " + "is -np.inf.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # zero row in equality constraints + zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any( + np.logical_and( + zero_row, + np.abs(b_eq) > tol)): # test_zero_row_1 + # infeasible if RHS is not zero + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if RHS is zero, we can eliminate this equation entirely + A_eq = A_eq[np.logical_not(zero_row), :] + b_eq = b_eq[np.logical_not(zero_row)] + + # zero row in inequality constraints + zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 + # infeasible if RHS is less than zero (because LHS is zero) + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: # test_zero_row_2 + # if LHS is >= 0, we can eliminate this constraint entirely + A_ub = A_ub[np.logical_not(zero_row), :] + b_ub = b_ub[np.logical_not(zero_row)] + + # zero column in (both) constraints + # this indicates that a variable isn't constrained and can be removed + A = vstack((A_eq, A_ub)) + if A.shape[0] > 0: + zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() + # variable will be at upper or lower bound, depending on objective + x[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + x[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + if np.any(np.isinf(x)): # if an unconstrained variable has no bound + status = 3 + message = ("If feasible, the problem is (trivially) unbounded " + "due to a zero column in the constraint matrices. If " + "you wish to check whether the problem is infeasible, " + "turn presolve off.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + # variables will equal upper/lower bounds will be removed later + lb[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + ub[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + + # row singleton in equality constraints + # this fixes a variable and removes the constraint + singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() + rows = where(singleton_row)[0] + cols = where(A_eq[rows, :])[1] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_eq[row] / A_eq[row, col] + if not lb[col] - tol <= val <= ub[col] + tol: + # infeasible if fixed value is not within bounds + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the equality constraints is " + "inconsistent with the bounds.") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + else: + # sets upper and lower bounds at that fixed value - variable + # will be removed later + lb[col] = val + ub[col] = val + A_eq = A_eq[np.logical_not(singleton_row), :] + b_eq = b_eq[np.logical_not(singleton_row)] + + # row singleton in inequality constraints + # this indicates a simple bound and the constraint can be removed + # simple bounds may be adjusted here + # After all of the simple bound information is combined here, get_Abc will + # turn the simple bounds into constraints + singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() + cols = where(A_ub[singleton_row, :])[1] + rows = where(singleton_row)[0] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_ub[row] / A_ub[row, col] + if A_ub[row, col] > 0: # upper bound + if val < lb[col] - tol: # infeasible + complete = True + elif val < ub[col]: # new upper bound + ub[col] = val + else: # lower bound + if val > ub[col] + tol: # infeasible + complete = True + elif val > lb[col]: # new lower bound + lb[col] = val + if complete: + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the upper bound constraints is " + "inconsistent with the bounds.") + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + A_ub = A_ub[np.logical_not(singleton_row), :] + b_ub = b_ub[np.logical_not(singleton_row)] + + # identical bounds indicate that variable can be removed + i_f = np.abs(lb - ub) < tol # indices of "fixed" variables + i_nf = np.logical_not(i_f) # indices of "not fixed" variables + + # test_bounds_equal_but_infeasible + if np.all(i_f): # if bounds define solution, check for consistency + residual = b_eq - A_eq.dot(lb) + slack = b_ub - A_ub.dot(lb) + if ((A_ub.size > 0 and np.any(slack < 0)) or + (A_eq.size > 0 and not np.allclose(residual, 0))): + status = 2 + message = ("The problem is (trivially) infeasible because the " + "bounds fix all variables to values inconsistent with " + "the constraints") + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + ub_mod = ub + lb_mod = lb + if np.any(i_f): + c0 += c[i_f].dot(lb[i_f]) + b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) + b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) + c = c[i_nf] + x_undo = lb[i_f] # not x[i_f], x is just zeroes + x = x[i_nf] + # user guess x0 stays separate from presolve solution x + if x0 is not None: + x0 = x0[i_nf] + A_eq = A_eq[:, i_nf] + A_ub = A_ub[:, i_nf] + # modify bounds + lb_mod = lb[i_nf] + ub_mod = ub[i_nf] + + def rev(x_mod): + # Function to restore x: insert x_undo into x_mod. + # When elements have been removed at positions k1, k2, k3, ... + # then these must be replaced at (after) positions k1-1, k2-2, + # k3-3, ... in the modified array to recreate the original + i = np.flatnonzero(i_f) + # Number of variables to restore + N = len(i) + index_offset = np.arange(N) + # Create insert indices + insert_indices = i - index_offset + x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo) + return x_rev + + # Use revstack as a list of functions, currently just this one. + revstack.append(rev) + + # no constraints indicates that problem is trivial + if A_eq.size == 0 and A_ub.size == 0: + b_eq = np.array([]) + b_ub = np.array([]) + # test_empty_constraint_1 + if c.size == 0: + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or + np.any(np.logical_and(c > 0, lb_mod == -np.inf))): + # test_no_constraints() + # test_unbounded_no_nontrivial_constraints_1 + # test_unbounded_no_nontrivial_constraints_2 + status = 3 + message = ("The problem is (trivially) unbounded " + "because there are no non-trivial constraints and " + "a) at least one decision variable is unbounded " + "above and its corresponding cost is negative, or " + "b) at least one decision variable is unbounded below " + "and its corresponding cost is positive. ") + else: # test_empty_constraint_2 + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + complete = True + x[c < 0] = ub_mod[c < 0] + x[c > 0] = lb_mod[c > 0] + # where c is zero, set x to a finite bound or zero + x_zero_c = ub_mod[c == 0] + x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)] + x_zero_c[np.isinf(x_zero_c)] = 0 + x[c == 0] = x_zero_c + # if this is not the last step of presolve, should convert bounds back + # to array and return here + + # Convert modified lb and ub back into N x 2 bounds + bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis])) + + # remove redundant (linearly dependent) rows from equality constraints + n_rows_A = A_eq.shape[0] + redundancy_warning = ("A_eq does not appear to be of full row rank. To " + "improve performance, check the problem formulation " + "for redundant equality constraints.") + if (sps.issparse(A_eq)): + if rr and A_eq.size > 0: # TODO: Fast sparse rank check? + rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if A_eq.shape[0] < n_rows_A: + warn(redundancy_warning, OptimizeWarning, stacklevel=1) + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + # This is a wild guess for which redundancy removal algorithm will be + # faster. More testing would be good. + small_nullspace = 5 + if rr and A_eq.size > 0: + try: # TODO: use results of first SVD in _remove_redundancy_svd + rank = np.linalg.matrix_rank(A_eq) + # oh well, we'll have to go with _remove_redundancy_pivot_dense + except Exception: + rank = 0 + if rr and A_eq.size > 0 and rank < A_eq.shape[0]: + warn(redundancy_warning, OptimizeWarning, stacklevel=3) + dim_row_nullspace = A_eq.shape[0]-rank + if rr_method is None: + if dim_row_nullspace <= small_nullspace: + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + if dim_row_nullspace > small_nullspace or status == 4: + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + + else: + rr_method = rr_method.lower() + if rr_method == "svd": + rr_res = _remove_redundancy_svd(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "pivot": + rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) + A_eq, b_eq, status, message = rr_res + elif rr_method == "id": + rr_res = _remove_redundancy_id(A_eq, b_eq, rank) + A_eq, b_eq, status, message = rr_res + else: # shouldn't get here; option validity checked above + pass + if A_eq.shape[0] < rank: + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + status = 4 + if status != 0: + complete = True + return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), + c0, x, revstack, complete, status, message) + + +def _parse_linprog(lp, options, meth): + """ + Parse the provided linear programming problem + + ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and + ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the + provided constraints (``A_ub`` and ``A_eq) and if these match the provided + sparsity optional values. + + ``_clean inputs`` checks of the provided inputs. If no violations are + identified the objective vector, upper bound constraints, equality + constraints, and simple bounds are returned in the expected format. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : various valid formats, optional + The bounds of ``x``, as ``min`` and ``max`` pairs. + If bounds are specified for all N variables separately, valid formats are: + * a 2D array (2 x N or N x 2); + * a sequence of N sequences, each with 2 values. + If all variables have the same bounds, a single pair of values can + be specified. Valid formats are: + * a sequence with 2 scalar values; + * a sequence with a single element containing 2 scalar values. + If all variables have a lower bound of 0 and no upper bound, the bounds + parameter can be omitted (or given as None). + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N + elements of ``x``. The N x 2 array contains lower bounds in the first + column and upper bounds in the 2nd. Unbounded variables have lower + bound -np.inf and/or upper bound np.inf. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + """ + if options is None: + options = {} + + solver_options = {k: v for k, v in options.items()} + solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth, + lp.A_ub, lp.A_eq) + # Convert lists to numpy arrays, etc... + lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq)) + return lp, solver_options + + +def _get_Abc(lp, c0): + """ + Given a linear programming problem of the form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + Return the problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + by adding slack variables and making variable substitutions as necessary. + + Parameters + ---------- + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + + Returns + ------- + A : 2-D array + 2-D array such that ``A`` @ ``x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in A (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + x0 : 1-D array + Starting values of the independent variables, which will be refined by + the optimization algorithm + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + + """ + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp + + if sps.issparse(A_eq): + sparse = True + A_eq = sps.csr_matrix(A_eq) + A_ub = sps.csr_matrix(A_ub) + + def hstack(blocks): + return sps.hstack(blocks, format="csr") + + def vstack(blocks): + return sps.vstack(blocks, format="csr") + + zeros = sps.csr_matrix + eye = sps.eye + else: + sparse = False + hstack = np.hstack + vstack = np.vstack + zeros = np.zeros + eye = np.eye + + # Variables lbs and ubs (see below) may be changed, which feeds back into + # bounds, so copy. + bounds = np.array(bounds, copy=True) + + # modify problem such that all variables have only non-negativity bounds + lbs = bounds[:, 0] + ubs = bounds[:, 1] + m_ub, n_ub = A_ub.shape + + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + + # unbounded below: substitute xi = -xi' (unbounded above) + # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds + l_nolb_someub = np.logical_and(lb_none, ub_some) + i_nolb = np.nonzero(l_nolb_someub)[0] + lbs[l_nolb_someub], ubs[l_nolb_someub] = ( + -ubs[l_nolb_someub], -lbs[l_nolb_someub]) + lb_none = np.equal(lbs, -np.inf) + ub_none = np.equal(ubs, np.inf) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + c[i_nolb] *= -1 + if x0 is not None: + x0[i_nolb] *= -1 + if len(i_nolb) > 0: + if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird + A_ub[:, i_nolb] *= -1 + if A_eq.shape[0] > 0: + A_eq[:, i_nolb] *= -1 + + # upper bound: add inequality constraint + i_newub, = ub_some.nonzero() + ub_newub = ubs[ub_some] + n_bounds = len(i_newub) + if n_bounds > 0: + shape = (n_bounds, A_ub.shape[1]) + if sparse: + idxs = (np.arange(n_bounds), i_newub) + A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs), + shape=shape))) + else: + A_ub = vstack((A_ub, np.zeros(shape))) + A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1 + b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) + b_ub[m_ub:] = ub_newub + + A1 = vstack((A_ub, A_eq)) + b = np.concatenate((b_ub, b_eq)) + c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],)))) + # unbounded: substitute xi = xi+ + xi- + l_free = np.logical_and(lb_none, ub_none) + i_free = np.nonzero(l_free)[0] + n_free = len(i_free) + c = np.concatenate((c, np.zeros(n_free))) + if x0 is not None: + x0 = np.concatenate((x0, np.zeros(n_free))) + A1 = hstack((A1[:, :n_ub], -A1[:, i_free])) + c[n_ub:n_ub+n_free] = -c[i_free] + if x0 is not None: + i_free_neg = x0[i_free] < 0 + x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]] + x0[i_free[i_free_neg]] = 0 + + # add slack variables + A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) + + A = hstack([A1, A2]) + + # lower bound: substitute xi = xi' + lb + # now there is a constant term in objective + i_shift = np.nonzero(lb_some)[0] + lb_shift = lbs[lb_some].astype(float) + c0 += np.sum(lb_shift * c[i_shift]) + if sparse: + b = b.reshape(-1, 1) + A = A.tocsc() + b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1) + b = b.ravel() + else: + b -= (A[:, i_shift] * lb_shift).sum(axis=1) + if x0 is not None: + x0[i_shift] -= lb_shift + + return A, b, c, c0, x0 + + +def _round_to_power_of_two(x): + """ + Round elements of the array to the nearest power of two. + """ + return 2**np.around(np.log2(x)) + + +def _autoscale(A, b, c, x0): + """ + Scales the problem according to equilibration from [12]. + Also normalizes the right hand side vector by its maximum element. + """ + m, n = A.shape + + C = 1 + R = 1 + + if A.size > 0: + + R = np.max(np.abs(A), axis=1) + if sps.issparse(A): + R = R.toarray().flatten() + R[R == 0] = 1 + R = 1/_round_to_power_of_two(R) + A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1) + b = b*R + + C = np.max(np.abs(A), axis=0) + if sps.issparse(A): + C = C.toarray().flatten() + C[C == 0] = 1 + C = 1/_round_to_power_of_two(C) + A = A*sps.diags(C) if sps.issparse(A) else A*C + c = c*C + + b_scale = np.max(np.abs(b)) if b.size > 0 else 1 + if b_scale == 0: + b_scale = 1. + b = b/b_scale + + if x0 is not None: + x0 = x0/b_scale*(1/C) + return A, b, c, x0, C, b_scale + + +def _unscale(x, C, b_scale): + """ + Converts solution to _autoscale problem -> solution to original problem. + """ + + try: + n = len(C) + # fails if sparse or scalar; that's OK. + # this is only needed for original simplex (never sparse) + except TypeError: + n = len(x) + + return x[:n]*b_scale*C + + +def _display_summary(message, status, fun, iteration): + """ + Print the termination summary of the linear program + + Parameters + ---------- + message : str + A string descriptor of the exit status of the optimization. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + fun : float + Value of the objective function. + iteration : iteration + The number of iterations performed. + """ + print(message) + if status in (0, 1): + print(f" Current function value: {fun: <12.6f}") + print(f" Iterations: {iteration:d}") + + +def _postsolve(x, postsolve_args, complete=False): + """ + Given solution x to presolved, standard form linear program x, add + fixed variables back into the problem and undo the variable substitutions + to get solution to original linear program. Also, calculate the objective + function value, slack in original upper bound constraints, and residuals + in original equality constraints. + + Parameters + ---------- + x : 1-D array + Solution vector to the standard-form problem. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem, including: + + lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: + + c : 1D array + The coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + The inequality constraint matrix. Each row of ``A_ub`` specifies the + coefficients of a linear inequality constraint on ``x``. + b_ub : 1D array, optional + The inequality constraint vector. Each element represents an + upper bound on the corresponding value of ``A_ub @ x``. + A_eq : 2D array, optional + The equality constraint matrix. Each row of ``A_eq`` specifies the + coefficients of a linear equality constraint on ``x``. + b_eq : 1D array, optional + The equality constraint vector. Each element of ``A_eq @ x`` must equal + the corresponding element of ``b_eq``. + bounds : 2D array + The bounds of ``x``, lower bounds in the 1st column, upper + bounds in the 2nd column. The bounds are possibly tightened + by the presolve procedure. + x0 : 1D array, optional + Guess values of the decision variables, which will be refined by + the optimization algorithm. This argument is currently used only by the + 'revised simplex' method, and can only be used if `x0` represents a + basic feasible solution. + + revstack: list of functions + the functions in the list reverse the operations of _presolve() + the function signature is x_org = f(x_mod), where x_mod is the result + of a presolve step and x_org the value at the start of the step + complete : bool + Whether the solution is was determined in presolve (``True`` if so) + + Returns + ------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + """ + # note that all the inputs are the ORIGINAL, unmodified versions + # no rows, columns have been removed + + c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0] + revstack, C, b_scale = postsolve_args[1:] + + x = _unscale(x, C, b_scale) + + # Undo variable substitutions of _get_Abc() + # if "complete", problem was solved in presolve; don't do anything here + n_x = bounds.shape[0] + if not complete and bounds is not None: # bounds are never none, probably + n_unbounded = 0 + for i, bi in enumerate(bounds): + lbi = bi[0] + ubi = bi[1] + if lbi == -np.inf and ubi == np.inf: + n_unbounded += 1 + x[i] = x[i] - x[n_x + n_unbounded - 1] + else: + if lbi == -np.inf: + x[i] = ubi - x[i] + else: + x[i] += lbi + # all the rest of the variables were artificial + x = x[:n_x] + + # If there were variables removed from the problem, add them back into the + # solution vector + # Apply the functions in revstack (reverse direction) + for rev in reversed(revstack): + x = rev(x) + + fun = x.dot(c) + slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints + # report residuals of ORIGINAL EQ constraints + con = b_eq - A_eq.dot(x) + + return x, fun, slack, con + + +def _check_result(x, fun, status, slack, con, bounds, tol, message, + integrality): + """ + Check the validity of the provided solution. + + A valid (optimal) solution satisfies all bounds, all slack variables are + negative and all equality constraint residuals are strictly non-zero. + Further, the lower-bounds, upper-bounds, slack and residuals contain + no nan values. + + Parameters + ---------- + x : 1-D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + slack : 1-D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1-D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + bounds : 2D array + The bounds on the original variables ``x`` + message : str + A string descriptor of the exit status of the optimization. + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + """ + # Somewhat arbitrary + tol = np.sqrt(tol) * 10 + + if x is None: + # HiGHS does not provide x if infeasible/unbounded + if status == 0: # Observed with HiGHS Simplex Primal + status = 4 + message = ("The solver did not provide a solution nor did it " + "report a failure. Please submit a bug report.") + return status, message + + contains_nans = ( + np.isnan(x).any() + or np.isnan(fun) + or np.isnan(slack).any() + or np.isnan(con).any() + ) + + if contains_nans: + is_feasible = False + else: + if integrality is None: + integrality = 0 + valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol) + # When integrality is 2 or 3, x must be within bounds OR take value 0 + valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol) + invalid_bounds = not np.all(valid_bounds) + + invalid_slack = status != 3 and (slack < -tol).any() + invalid_con = status != 3 and (np.abs(con) > tol).any() + is_feasible = not (invalid_bounds or invalid_slack or invalid_con) + + if status == 0 and not is_feasible: + status = 4 + message = ("The solution does not satisfy the constraints within the " + "required tolerance of " + f"{tol:.2E}" + ", yet " + "no errors were raised and there is no certificate of " + "infeasibility or unboundedness. Check whether " + "the slack and constraint residuals are acceptable; " + "if not, consider enabling presolve, adjusting the " + "tolerance option(s), and/or using a different method. " + "Please consider submitting a bug report.") + elif status == 2 and is_feasible: + # Occurs if the simplex method exits after phase one with a very + # nearly basic feasible solution. Postsolving can make the solution + # basic, however, this solution is NOT optimal + status = 4 + message = ("The solution is feasible, but the solver did not report " + "that the solution was optimal. Please try a different " + "method.") + + return status, message diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minimize.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minimize.py new file mode 100644 index 0000000000000000000000000000000000000000..862308ac5432e36a14ab1bc30c3b200713050b8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minimize.py @@ -0,0 +1,1094 @@ +""" +Unified interfaces to minimization algorithms. + +Functions +--------- +- minimize : minimization of a function of several variables. +- minimize_scalar : minimization of a function of one variable. +""" + +__all__ = ['minimize', 'minimize_scalar'] + + +from warnings import warn + +import numpy as np + +# unconstrained minimization +from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, + _minimize_bfgs, _minimize_newtoncg, + _minimize_scalar_brent, _minimize_scalar_bounded, + _minimize_scalar_golden, MemoizeJac, OptimizeResult, + _wrap_callback, _recover_from_bracket_error) +from ._trustregion_dogleg import _minimize_dogleg +from ._trustregion_ncg import _minimize_trust_ncg +from ._trustregion_krylov import _minimize_trust_krylov +from ._trustregion_exact import _minimize_trustregion_exact +from ._trustregion_constr import _minimize_trustregion_constr + +# constrained minimization +from ._lbfgsb_py import _minimize_lbfgsb +from ._tnc import _minimize_tnc +from ._cobyla_py import _minimize_cobyla +from ._slsqp_py import _minimize_slsqp +from ._constraints import (old_bound_to_new, new_bounds_to_old, + old_constraint_to_new, new_constraint_to_old, + NonlinearConstraint, LinearConstraint, Bounds, + PreparedConstraint) +from ._differentiable_functions import FD_METHODS + +MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', + 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr', + 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'] + +# These methods support the new callback interface (passed an OptimizeResult) +MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', + 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov'] + +MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden'] + +def minimize(fun, x0, args=(), method=None, jac=None, hess=None, + hessp=None, bounds=None, constraints=(), tol=None, + callback=None, options=None): + """Minimization of scalar function of one or more variables. + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is a 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where ``n`` is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + Type of solver. Should be one of + + - 'Nelder-Mead' :ref:`(see here) ` + - 'Powell' :ref:`(see here) ` + - 'CG' :ref:`(see here) ` + - 'BFGS' :ref:`(see here) ` + - 'Newton-CG' :ref:`(see here) ` + - 'L-BFGS-B' :ref:`(see here) ` + - 'TNC' :ref:`(see here) ` + - 'COBYLA' :ref:`(see here) ` + - 'SLSQP' :ref:`(see here) ` + - 'trust-constr':ref:`(see here) ` + - 'dogleg' :ref:`(see here) ` + - 'trust-ncg' :ref:`(see here) ` + - 'trust-exact' :ref:`(see here) ` + - 'trust-krylov' :ref:`(see here) ` + - custom - a callable object, see below for description. + + If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, + depending on whether or not the problem has constraints or bounds. + jac : {callable, '2-point', '3-point', 'cs', bool}, optional + Method for computing the gradient vector. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, + trust-exact and trust-constr. + If it is a callable, it should be a function that returns the gradient + vector: + + ``jac(x, *args) -> array_like, shape (n,)`` + + where ``x`` is an array with shape (n,) and ``args`` is a tuple with + the fixed parameters. If `jac` is a Boolean and is True, `fun` is + assumed to return a tuple ``(f, g)`` containing the objective + function and the gradient. + Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and + 'trust-krylov' require that either a callable be supplied, or that + `fun` return the objective and gradient. + If None or False, the gradient will be estimated using 2-point finite + difference estimation with an absolute step size. + Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used + to select a finite difference scheme for numerical estimation of the + gradient with a relative step size. These finite difference schemes + obey any specified `bounds`. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional + Method for computing the Hessian matrix. Only for Newton-CG, dogleg, + trust-ncg, trust-krylov, trust-exact and trust-constr. + If it is callable, it should return the Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed + parameters. + The keywords {'2-point', '3-point', 'cs'} can also be used to select + a finite difference scheme for numerical estimation of the hessian. + Alternatively, objects implementing the `HessianUpdateStrategy` + interface can be used to approximate the Hessian. Available + quasi-Newton methods implementing this interface are: + + - `BFGS`; + - `SR1`. + + Not all of the options are available for each of the methods; for + availability refer to the notes. + hessp : callable, optional + Hessian of objective function times an arbitrary vector p. Only for + Newton-CG, trust-ncg, trust-krylov, trust-constr. + Only one of `hessp` or `hess` needs to be given. If `hess` is + provided, then `hessp` will be ignored. `hessp` must compute the + Hessian times an arbitrary vector: + + ``hessp(x, p, *args) -> ndarray shape (n,)`` + + where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with + dimension (n,) and ``args`` is a tuple with the fixed + parameters. + bounds : sequence or `Bounds`, optional + Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, + trust-constr, and COBYLA methods. There are two ways to specify the + bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition. Only for COBYLA, SLSQP and trust-constr. + + Constraints for 'trust-constr' are defined as a single object or a + list of objects specifying constraints to the optimization problem. + Available constraints are: + + - `LinearConstraint` + - `NonlinearConstraint` + + Constraints for COBYLA, SLSQP are defined as a list of dictionaries. + Each dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + Note that COBYLA only supports inequality constraints. + tol : float, optional + Tolerance for termination. When `tol` is specified, the selected + minimization algorithm sets some relevant solver-specific tolerance(s) + equal to `tol`. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. All methods except `TNC` accept the + following generic options: + + maxiter : int + Maximum number of iterations to perform. Depending on the + method each iteration may use several function evaluations. + + For `TNC` use `maxfun` instead of `maxiter`. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options()`. + callback : callable, optional + A callable called after each iteration. + + All methods except TNC, SLSQP, and COBYLA support a callable with + the signature: + + ``callback(intermediate_result: OptimizeResult)`` + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the present values + of the parameter vector and objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. These methods will also terminate if + the callback raises ``StopIteration``. + + All methods except trust-constr (also) support a signature like: + + ``callback(xk)`` + + where ``xk`` is the current parameter vector. + + Introspection is used to determine which of the signatures above to + invoke. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + minimize_scalar : Interface to minimization algorithms for scalar + univariate functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *BFGS*. + + **Unconstrained minimization** + + Method :ref:`CG ` uses a nonlinear conjugate + gradient algorithm by Polak and Ribiere, a variant of the + Fletcher-Reeves method described in [5]_ pp.120-122. Only the + first derivatives are used. + + Method :ref:`BFGS ` uses the quasi-Newton + method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ + pp. 136. It uses the first derivatives only. BFGS has proven good + performance even for non-smooth optimizations. This method also + returns an approximation of the Hessian inverse, stored as + `hess_inv` in the OptimizeResult object. + + Method :ref:`Newton-CG ` uses a + Newton-CG algorithm [5]_ pp. 168 (also known as the truncated + Newton method). It uses a CG method to the compute the search + direction. See also *TNC* method for a box-constrained + minimization with a similar algorithm. Suitable for large-scale + problems. + + Method :ref:`dogleg ` uses the dog-leg + trust-region algorithm [5]_ for unconstrained minimization. This + algorithm requires the gradient and Hessian; furthermore the + Hessian is required to be positive definite. + + Method :ref:`trust-ncg ` uses the + Newton conjugate gradient trust-region algorithm [5]_ for + unconstrained minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + + Method :ref:`trust-krylov ` uses + the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained + minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + On indefinite problems it requires usually less iterations than the + `trust-ncg` method and is recommended for medium and large-scale problems. + + Method :ref:`trust-exact ` + is a trust-region method for unconstrained minimization in which + quadratic subproblems are solved almost exactly [13]_. This + algorithm requires the gradient and the Hessian (which is + *not* required to be positive definite). It is, in many + situations, the Newton method to converge in fewer iterations + and the most recommended for small and medium-size problems. + + **Bound-Constrained minimization** + + Method :ref:`Nelder-Mead ` uses the + Simplex algorithm [1]_, [2]_. This algorithm is robust in many + applications. However, if numerical computation of derivative can be + trusted, other algorithms using the first and/or second derivatives + information might be preferred for their better performance in + general. + + Method :ref:`L-BFGS-B ` uses the L-BFGS-B + algorithm [6]_, [7]_ for bound constrained minimization. + + Method :ref:`Powell ` is a modification + of Powell's method [3]_, [4]_ which is a conjugate direction + method. It performs sequential one-dimensional minimizations along + each vector of the directions set (`direc` field in `options` and + `info`), which is updated at each iteration of the main + minimization loop. The function need not be differentiable, and no + derivatives are taken. If bounds are not provided, then an + unbounded line search will be used. If bounds are provided and + the initial guess is within the bounds, then every function + evaluation throughout the minimization procedure will be within + the bounds. If bounds are provided, the initial guess is outside + the bounds, and `direc` is full rank (default has full rank), then + some function evaluations during the first iteration may be + outside the bounds, but every function evaluation after the first + iteration will be within the bounds. If `direc` is not full rank, + then some parameters may not be optimized and the solution is not + guaranteed to be within the bounds. + + Method :ref:`TNC ` uses a truncated Newton + algorithm [5]_, [8]_ to minimize a function with variables subject + to bounds. This algorithm uses gradient information; it is also + called Newton Conjugate-Gradient. It differs from the *Newton-CG* + method described above as it wraps a C implementation and allows + each variable to be given upper and lower bounds. + + **Constrained Minimization** + + Method :ref:`COBYLA ` uses the + Constrained Optimization BY Linear Approximation (COBYLA) method + [9]_, [10]_, [11]_. The algorithm is based on linear + approximations to the objective function and each constraint. The + method wraps a FORTRAN implementation of the algorithm. The + constraints functions 'fun' may return either a single number + or an array or list of numbers. + + Method :ref:`SLSQP ` uses Sequential + Least SQuares Programming to minimize a function of several + variables with any combination of bounds, equality and inequality + constraints. The method wraps the SLSQP Optimization subroutine + originally implemented by Dieter Kraft [12]_. Note that the + wrapper handles infinite values in bounds by converting them into + large floating values. + + Method :ref:`trust-constr ` is a + trust-region algorithm for constrained optimization. It switches + between two implementations depending on the problem definition. + It is the most versatile constrained minimization algorithm + implemented in SciPy and the most appropriate for large-scale problems. + For equality constrained problems it is an implementation of Byrd-Omojokun + Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When + inequality constraints are imposed as well, it switches to the trust-region + interior point method described in [16]_. This interior point algorithm, + in turn, solves inequality constraints by introducing slack variables + and solving a sequence of equality-constrained barrier problems + for progressively smaller values of the barrier parameter. + The previously described equality constrained SQP method is + used to solve the subproblems with increasing levels of accuracy + as the iterate gets closer to a solution. + + **Finite-Difference Options** + + For Method :ref:`trust-constr ` + the gradient and the Hessian may be approximated using + three finite-difference schemes: {'2-point', '3-point', 'cs'}. + The scheme 'cs' is, potentially, the most accurate but it + requires the function to correctly handle complex inputs and to + be differentiable in the complex plane. The scheme '3-point' is more + accurate than '2-point' but requires twice as many operations. If the + gradient is estimated via finite-differences the Hessian must be + estimated using one of the quasi-Newton strategies. + + **Method specific options for the** `hess` **keyword** + + +--------------+------+----------+-------------------------+-----+ + | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS | + +==============+======+==========+=========================+=====+ + | Newton-CG | x | (n, n) | x | x | + | | | LO | | | + +--------------+------+----------+-------------------------+-----+ + | dogleg | | (n, n) | | | + +--------------+------+----------+-------------------------+-----+ + | trust-ncg | | (n, n) | x | x | + +--------------+------+----------+-------------------------+-----+ + | trust-krylov | | (n, n) | x | x | + +--------------+------+----------+-------------------------+-----+ + | trust-exact | | (n, n) | | | + +--------------+------+----------+-------------------------+-----+ + | trust-constr | x | (n, n) | x | x | + | | | LO | | | + | | | sp | | | + +--------------+------+----------+-------------------------+-----+ + + where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using a frontend to this method such as `scipy.optimize.basinhopping` + or a different library. You can simply pass a callable as the ``method`` + parameter. + + The callable is called as ``method(fun, x0, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `callback`, `hess`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. Also, if + `jac` has been passed as a bool type, `jac` and `fun` are mangled so that + `fun` returns just the function values and `jac` is converted to a function + returning the Jacobian. The method shall return an `OptimizeResult` + object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + References + ---------- + .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function + Minimization. The Computer Journal 7: 308-13. + .. [2] Wright M H. 1996. Direct search methods: Once scorned, now + respectable, in Numerical Analysis 1995: Proceedings of the 1995 + Dundee Biennial Conference in Numerical Analysis (Eds. D F + Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. + 191-208. + .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of + a function of several variables without calculating derivatives. The + Computer Journal 7: 155-162. + .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. + Numerical Recipes (any edition), Cambridge University Press. + .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. + Springer New York. + .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory + Algorithm for Bound Constrained Optimization. SIAM Journal on + Scientific and Statistical Computing 16 (5): 1190-1208. + .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm + 778: L-BFGS-B, FORTRAN routines for large scale bound constrained + optimization. ACM Transactions on Mathematical Software 23 (4): + 550-560. + .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. + 1984. SIAM Journal of Numerical Analysis 21: 770-778. + .. [9] Powell, M J D. A direct search optimization method that models + the objective and constraint functions by linear interpolation. + 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez + and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. + .. [10] Powell M J D. Direct search algorithms for optimization + calculations. 1998. Acta Numerica 7: 287-336. + .. [11] Powell M J D. A view of algorithms for optimization without + derivatives. 2007.Cambridge University Technical Report DAMTP + 2007/NA03 + .. [12] Kraft, D. A software package for sequential quadratic + programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace + Center -- Institute for Flight Mechanics, Koln, Germany. + .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. + Trust region methods. 2000. Siam. pp. 169-200. + .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free + implementation of the GLTR method for iterative solution of + the trust region problem", :arxiv:`1611.04718` + .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the + Trust-Region Subproblem using the Lanczos Method", + SIAM J. Optim., 9(2), 504--525, (1999). + .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999. + An interior point algorithm for large-scale nonlinear programming. + SIAM Journal on Optimization 9.4: 877-900. + .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the + implementation of an algorithm for large-scale equality constrained + optimization. SIAM Journal on Optimization 8.3: 682-706. + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function (and its respective derivatives) is implemented in `rosen` + (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. + + >>> from scipy.optimize import minimize, rosen, rosen_der + + A simple application of the *Nelder-Mead* method is: + + >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) + >>> res.x + array([ 1., 1., 1., 1., 1.]) + + Now using the *BFGS* algorithm, using the first derivative and a few + options: + + >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, + ... options={'gtol': 1e-6, 'disp': True}) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 26 + Function evaluations: 31 + Gradient evaluations: 31 + >>> res.x + array([ 1., 1., 1., 1., 1.]) + >>> print(res.message) + Optimization terminated successfully. + >>> res.hess_inv + array([ + [ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary + [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], + [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], + [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], + [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523] + ]) + + + Next, consider a minimization problem with several constraints (namely + Example 16.4 from [5]_). The objective function is: + + >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + + There are three constraints defined as: + + >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + And variables must be positive, hence the following bounds: + + >>> bnds = ((0, None), (0, None)) + + The optimization problem is solved using the SLSQP method as: + + >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, + ... constraints=cons) + + It should converge to the theoretical solution (1.4 ,1.7). + + """ + x0 = np.atleast_1d(np.asarray(x0)) + + if x0.ndim != 1: + raise ValueError("'x0' must only have one dimension.") + + if x0.dtype.kind in np.typecodes["AllInteger"]: + x0 = np.asarray(x0, dtype=float) + + if not isinstance(args, tuple): + args = (args,) + + if method is None: + # Select automatically + if constraints: + method = 'SLSQP' + elif bounds is not None: + method = 'L-BFGS-B' + else: + method = 'BFGS' + + if callable(method): + meth = "_custom" + else: + meth = method.lower() + + if options is None: + options = {} + # check if optional parameters are supported by the selected method + # - jac + if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac): + warn('Method %s does not use gradient information (jac).' % method, + RuntimeWarning, stacklevel=2) + # - hess + if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', + 'trust-krylov', 'trust-exact', '_custom') and hess is not None: + warn('Method %s does not use Hessian information (hess).' % method, + RuntimeWarning, stacklevel=2) + # - hessp + if meth not in ('newton-cg', 'trust-ncg', 'trust-constr', + 'trust-krylov', '_custom') \ + and hessp is not None: + warn('Method %s does not use Hessian-vector product ' + 'information (hessp).' % method, + RuntimeWarning, stacklevel=2) + # - constraints or bounds + if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and + np.any(constraints)): + warn('Method %s cannot handle constraints.' % method, + RuntimeWarning, stacklevel=2) + if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'slsqp', + 'tnc', 'trust-constr', '_custom') and bounds is not None: + warn('Method %s cannot handle bounds.' % method, + RuntimeWarning, stacklevel=2) + # - return_all + if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and + options.get('return_all', False)): + warn('Method %s does not support the return_all option.' % method, + RuntimeWarning, stacklevel=2) + + # check gradient vector + if callable(jac): + pass + elif jac is True: + # fun returns func and grad + fun = MemoizeJac(fun) + jac = fun.derivative + elif (jac in FD_METHODS and + meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']): + # finite differences with relative step + pass + elif meth in ['trust-constr']: + # default jac calculation for this method + jac = '2-point' + elif jac is None or bool(jac) is False: + # this will cause e.g. LBFGS to use forward difference, absolute step + jac = None + else: + # default if jac option is not understood + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth == 'nelder-mead': + options.setdefault('xatol', tol) + options.setdefault('fatol', tol) + if meth in ('newton-cg', 'powell', 'tnc'): + options.setdefault('xtol', tol) + if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'): + options.setdefault('ftol', tol) + if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', + 'trust-ncg', 'trust-exact', 'trust-krylov'): + options.setdefault('gtol', tol) + if meth in ('cobyla', '_custom'): + options.setdefault('tol', tol) + if meth == 'trust-constr': + options.setdefault('xtol', tol) + options.setdefault('gtol', tol) + options.setdefault('barrier_tol', tol) + + if meth == '_custom': + # custom method called before bounds and constraints are 'standardised' + # custom method should be able to accept whatever bounds/constraints + # are provided to it. + return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, + bounds=bounds, constraints=constraints, + callback=callback, **options) + + constraints = standardize_constraints(constraints, x0, meth) + + remove_vars = False + if bounds is not None: + # convert to new-style bounds so we only have to consider one case + bounds = standardize_bounds(bounds, x0, 'new') + bounds = _validate_bounds(bounds, x0, meth) + + if meth in {"tnc", "slsqp", "l-bfgs-b"}: + # These methods can't take the finite-difference derivatives they + # need when a variable is fixed by the bounds. To avoid this issue, + # remove fixed variables from the problem. + # NOTE: if this list is expanded, then be sure to update the + # accompanying tests and test_optimize.eb_data. Consider also if + # default OptimizeResult will need updating. + + # determine whether any variables are fixed + i_fixed = (bounds.lb == bounds.ub) + + if np.all(i_fixed): + # all the parameters are fixed, a minimizer is not able to do + # anything + return _optimize_result_for_equal_bounds( + fun, bounds, meth, args=args, constraints=constraints + ) + + # determine whether finite differences are needed for any grad/jac + fd_needed = (not callable(jac)) + for con in constraints: + if not callable(con.get('jac', None)): + fd_needed = True + + # If finite differences are ever used, remove all fixed variables + # Always remove fixed variables for TNC; see gh-14565 + remove_vars = i_fixed.any() and (fd_needed or meth == "tnc") + if remove_vars: + x_fixed = (bounds.lb)[i_fixed] + x0 = x0[~i_fixed] + bounds = _remove_from_bounds(bounds, i_fixed) + fun = _remove_from_func(fun, i_fixed, x_fixed) + if callable(callback): + callback = _remove_from_func(callback, i_fixed, x_fixed) + if callable(jac): + jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1) + + # make a copy of the constraints so the user's version doesn't + # get changed. (Shallow copy is ok) + constraints = [con.copy() for con in constraints] + for con in constraints: # yes, guaranteed to be a list + con['fun'] = _remove_from_func(con['fun'], i_fixed, + x_fixed, min_dim=1, + remove=0) + if callable(con.get('jac', None)): + con['jac'] = _remove_from_func(con['jac'], i_fixed, + x_fixed, min_dim=2, + remove=1) + bounds = standardize_bounds(bounds, x0, meth) + + callback = _wrap_callback(callback, meth) + + if meth == 'nelder-mead': + res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds, + **options) + elif meth == 'powell': + res = _minimize_powell(fun, x0, args, callback, bounds, **options) + elif meth == 'cg': + res = _minimize_cg(fun, x0, args, jac, callback, **options) + elif meth == 'bfgs': + res = _minimize_bfgs(fun, x0, args, jac, callback, **options) + elif meth == 'newton-cg': + res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, + **options) + elif meth == 'l-bfgs-b': + res = _minimize_lbfgsb(fun, x0, args, jac, bounds, + callback=callback, **options) + elif meth == 'tnc': + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, + **options) + elif meth == 'cobyla': + res = _minimize_cobyla(fun, x0, args, constraints, callback=callback, + bounds=bounds, **options) + elif meth == 'slsqp': + res = _minimize_slsqp(fun, x0, args, jac, bounds, + constraints, callback=callback, **options) + elif meth == 'trust-constr': + res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp, + bounds, constraints, + callback=callback, **options) + elif meth == 'dogleg': + res = _minimize_dogleg(fun, x0, args, jac, hess, + callback=callback, **options) + elif meth == 'trust-ncg': + res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-krylov': + res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-exact': + res = _minimize_trustregion_exact(fun, x0, args, jac, hess, + callback=callback, **options) + else: + raise ValueError('Unknown solver %s' % method) + + if remove_vars: + res.x = _add_to_array(res.x, i_fixed, x_fixed) + res.jac = _add_to_array(res.jac, i_fixed, np.nan) + if "hess_inv" in res: + res.hess_inv = None # unknown + + if getattr(callback, 'stop_iteration', False): + res.success = False + res.status = 99 + res.message = "`callback` raised `StopIteration`." + + return res + + +def minimize_scalar(fun, bracket=None, bounds=None, args=(), + method=None, tol=None, options=None): + """Local minimization of scalar function of one variable. + + Parameters + ---------- + fun : callable + Objective function. + Scalar function, must return a scalar. + bracket : sequence, optional + For methods 'brent' and 'golden', `bracket` defines the bracketing + interval and is required. + Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair + ``(xa, xb)`` to be used as initial points for a downhill bracket search + (see `scipy.optimize.bracket`). + The minimizer ``res.x`` will not necessarily satisfy + ``xa <= res.x <= xb``. + bounds : sequence, optional + For method 'bounded', `bounds` is mandatory and must have two finite + items corresponding to the optimization bounds. + args : tuple, optional + Extra arguments passed to the objective function. + method : str or callable, optional + Type of solver. Should be one of: + + - :ref:`Brent ` + - :ref:`Bounded ` + - :ref:`Golden ` + - custom - a callable object (added in version 0.14.0), see below + + Default is "Bounded" if bounds are provided and "Brent" otherwise. + See the 'Notes' section for details of each solver. + + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + See :func:`show_options()` for solver-specific options. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + minimize : Interface to minimization algorithms for scalar multivariate + functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is the ``"Bounded"`` Brent method if + `bounds` are passed and unbounded ``"Brent"`` otherwise. + + Method :ref:`Brent ` uses Brent's + algorithm [1]_ to find a local minimum. The algorithm uses inverse + parabolic interpolation when possible to speed up convergence of + the golden section method. + + Method :ref:`Golden ` uses the + golden section search technique [1]_. It uses analog of the bisection + method to decrease the bracketed interval. It is usually + preferable to use the *Brent* method. + + Method :ref:`Bounded ` can + perform bounded minimization [2]_ [3]_. It uses the Brent method to find a + local minimum in the interval x1 < xopt < x2. + + Note that the Brent and Golden methods do not guarantee success unless a + valid ``bracket`` triple is provided. If a three-point bracket cannot be + found, consider `scipy.optimize.minimize`. Also, all methods are intended + only for local minimization. When the function of interest has more than + one local minimum, consider :ref:`global_optimization`. + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using some library frontend to minimize_scalar. You can simply + pass a callable as the ``method`` parameter. + + The callable is called as ``method(fun, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `bracket`, `tol`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. The method + shall return an `OptimizeResult` object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery. + Numerical Recipes in C. Cambridge University Press. + .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + Examples + -------- + Consider the problem of minimizing the following function. + + >>> def f(x): + ... return (x - 2) * x * (x + 2)**2 + + Using the *Brent* method, we find the local minimum as: + + >>> from scipy.optimize import minimize_scalar + >>> res = minimize_scalar(f) + >>> res.fun + -9.9149495908 + + The minimizer is: + + >>> res.x + 1.28077640403 + + Using the *Bounded* method, we find a local minimum with specified + bounds as: + + >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') + >>> res.fun # minimum + 3.28365179850e-13 + >>> res.x # minimizer + -2.0000002026 + + """ + if not isinstance(args, tuple): + args = (args,) + + if callable(method): + meth = "_custom" + elif method is None: + meth = 'brent' if bounds is None else 'bounded' + else: + meth = method.lower() + if options is None: + options = {} + + if bounds is not None and meth in {'brent', 'golden'}: + message = f"Use of `bounds` is incompatible with 'method={method}'." + raise ValueError(message) + + if tol is not None: + options = dict(options) + if meth == 'bounded' and 'xatol' not in options: + warn("Method 'bounded' does not support relative tolerance in x; " + "defaulting to absolute tolerance.", + RuntimeWarning, stacklevel=2) + options['xatol'] = tol + elif meth == '_custom': + options.setdefault('tol', tol) + else: + options.setdefault('xtol', tol) + + # replace boolean "disp" option, if specified, by an integer value. + disp = options.get('disp') + if isinstance(disp, bool): + options['disp'] = 2 * int(disp) + + if meth == '_custom': + res = method(fun, args=args, bracket=bracket, bounds=bounds, **options) + elif meth == 'brent': + res = _recover_from_bracket_error(_minimize_scalar_brent, + fun, bracket, args, **options) + elif meth == 'bounded': + if bounds is None: + raise ValueError('The `bounds` parameter is mandatory for ' + 'method `bounded`.') + res = _minimize_scalar_bounded(fun, bounds, args, **options) + elif meth == 'golden': + res = _recover_from_bracket_error(_minimize_scalar_golden, + fun, bracket, args, **options) + else: + raise ValueError('Unknown solver %s' % method) + + # gh-16196 reported inconsistencies in the output shape of `res.x`. While + # fixing this, future-proof it for when the function is vectorized: + # the shape of `res.x` should match that of `res.fun`. + res.fun = np.asarray(res.fun)[()] + res.x = np.reshape(res.x, res.fun.shape)[()] + return res + + +def _remove_from_bounds(bounds, i_fixed): + """Removes fixed variables from a `Bounds` instance""" + lb = bounds.lb[~i_fixed] + ub = bounds.ub[~i_fixed] + return Bounds(lb, ub) # don't mutate original Bounds object + + +def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0): + """Wraps a function such that fixed variables need not be passed in""" + def fun_out(x_in, *args, **kwargs): + x_out = np.zeros_like(i_fixed, dtype=x_in.dtype) + x_out[i_fixed] = x_fixed + x_out[~i_fixed] = x_in + y_out = fun_in(x_out, *args, **kwargs) + y_out = np.array(y_out) + + if min_dim == 1: + y_out = np.atleast_1d(y_out) + elif min_dim == 2: + y_out = np.atleast_2d(y_out) + + if remove == 1: + y_out = y_out[..., ~i_fixed] + elif remove == 2: + y_out = y_out[~i_fixed, ~i_fixed] + + return y_out + return fun_out + + +def _add_to_array(x_in, i_fixed, x_fixed): + """Adds fixed variables back to an array""" + i_free = ~i_fixed + if x_in.ndim == 2: + i_free = i_free[:, None] @ i_free[None, :] + x_out = np.zeros_like(i_free, dtype=x_in.dtype) + x_out[~i_free] = x_fixed + x_out[i_free] = x_in.ravel() + return x_out + + +def _validate_bounds(bounds, x0, meth): + """Check that bounds are valid.""" + + msg = "An upper bound is less than the corresponding lower bound." + if np.any(bounds.ub < bounds.lb): + raise ValueError(msg) + + msg = "The number of bounds is not compatible with the length of `x0`." + try: + bounds.lb = np.broadcast_to(bounds.lb, x0.shape) + bounds.ub = np.broadcast_to(bounds.ub, x0.shape) + except Exception as e: + raise ValueError(msg) from e + + return bounds + +def standardize_bounds(bounds, x0, meth): + """Converts bounds to the form required by the solver.""" + if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'new'}: + if not isinstance(bounds, Bounds): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'): + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0]) + return bounds + + +def standardize_constraints(constraints, x0, meth): + """Converts constraints to the form required by the solver.""" + all_constraint_types = (NonlinearConstraint, LinearConstraint, dict) + new_constraint_types = all_constraint_types[:-1] + if constraints is None: + constraints = [] + elif isinstance(constraints, all_constraint_types): + constraints = [constraints] + else: + constraints = list(constraints) # ensure it's a mutable sequence + + if meth in ['trust-constr', 'new']: + for i, con in enumerate(constraints): + if not isinstance(con, new_constraint_types): + constraints[i] = old_constraint_to_new(i, con) + else: + # iterate over copy, changing original + for i, con in enumerate(list(constraints)): + if isinstance(con, new_constraint_types): + old_constraints = new_constraint_to_old(con, x0) + constraints[i] = old_constraints[0] + constraints.extend(old_constraints[1:]) # appends 1 if present + + return constraints + + +def _optimize_result_for_equal_bounds( + fun, bounds, method, args=(), constraints=() +): + """ + Provides a default OptimizeResult for when a bounded minimization method + has (lb == ub).all(). + + Parameters + ---------- + fun: callable + bounds: Bounds + method: str + constraints: Constraint + """ + success = True + message = 'All independent variables were fixed by bounds.' + + # bounds is new-style + x0 = bounds.lb + + if constraints: + message = ("All independent variables were fixed by bounds at values" + " that satisfy the constraints.") + constraints = standardize_constraints(constraints, x0, 'new') + + maxcv = 0 + for c in constraints: + pc = PreparedConstraint(c, x0) + violation = pc.violation(x0) + if np.sum(violation): + maxcv = max(maxcv, np.max(violation)) + success = False + message = (f"All independent variables were fixed by bounds, but " + f"the independent variables do not satisfy the " + f"constraints exactly. (Maximum violation: {maxcv}).") + + return OptimizeResult( + x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1, + njev=0, nhev=0, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..93044f647967768bbbe09b1b5e3ee71cea88668b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..afb2087ac25d0e8c34d81860f6dd26b14895340d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py @@ -0,0 +1,1157 @@ +import warnings +from . import _minpack + +import numpy as np +from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater, + asarray, inf, + finfo, inexact, issubdtype, dtype) +from scipy import linalg +from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError +from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning +from ._lsq import least_squares +# from ._lsq.common import make_strictly_feasible +from ._lsq.least_squares import prepare_bounds +from scipy.optimize._minimize import Bounds + +# deprecated imports to be removed in SciPy 1.13.0 +from numpy import dot, eye, take # noqa: F401 +from numpy.linalg import inv # noqa: F401 + +error = _minpack.error + +__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] + + +def _check_func(checker, argname, thefunc, x0, args, numinputs, + output_shape=None): + res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + if (output_shape is not None) and (shape(res) != output_shape): + if (output_shape[0] != 1): + if len(output_shape) > 1: + if output_shape[1] == 1: + return shape(res) + msg = f"{checker}: there is a mismatch between the input and output " \ + f"shape of the '{argname}' argument" + func_name = getattr(thefunc, '__name__', None) + if func_name: + msg += " '%s'." % func_name + else: + msg += "." + msg += f'Shape should be {output_shape} but it is {shape(res)}.' + raise TypeError(msg) + if issubdtype(res.dtype, inexact): + dt = res.dtype + else: + dt = dtype(float) + return shape(res), dt + + +def fsolve(func, x0, args=(), fprime=None, full_output=0, + col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, + epsfcn=None, factor=100, diag=None): + """ + Find the roots of a function. + + Return the roots of the (non-linear) equations defined by + ``func(x) = 0`` given a starting estimate. + + Parameters + ---------- + func : callable ``f(x, *args)`` + A function that takes at least one (possibly vector) argument, + and returns a value of the same length. + x0 : ndarray + The starting estimate for the roots of ``func(x) = 0``. + args : tuple, optional + Any extra arguments to `func`. + fprime : callable ``f(x, *args)``, optional + A function to compute the Jacobian of `func` with derivatives + across the rows. By default, the Jacobian will be estimated. + full_output : bool, optional + If True, return optional outputs. + col_deriv : bool, optional + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float, optional + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int, optional + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple, optional + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + epsfcn : float, optional + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `epsfcn` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the + variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for + an unsuccessful call). + infodict : dict + A dictionary of optional outputs with the keys: + + ``nfev`` + number of function calls + ``njev`` + number of Jacobian calls + ``fvec`` + function evaluated at the output + ``fjac`` + the orthogonal matrix, q, produced by the QR + factorization of the final approximate Jacobian + matrix, stored column wise + ``r`` + upper triangular matrix produced by QR factorization + of the same matrix + ``qtf`` + the vector ``(transpose(q) * fvec)`` + + ier : int + An integer flag. Set to 1 if a solution was found, otherwise refer + to `mesg` for more information. + mesg : str + If no solution is found, `mesg` details the cause of failure. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See the ``method='hybr'`` in particular. + + Notes + ----- + ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. + + Examples + -------- + Find a solution to the system of equations: + ``x0*cos(x1) = 4, x1*x0 - x1 = 5``. + + >>> import numpy as np + >>> from scipy.optimize import fsolve + >>> def func(x): + ... return [x[0] * np.cos(x[1]) - 4, + ... x[1] * x[0] - x[1] - 5] + >>> root = fsolve(func, [1, 1]) + >>> root + array([6.50409711, 0.90841421]) + >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. + array([ True, True]) + + """ + options = {'col_deriv': col_deriv, + 'xtol': xtol, + 'maxfev': maxfev, + 'band': band, + 'eps': epsfcn, + 'factor': factor, + 'diag': diag} + + res = _root_hybr(func, x0, args, jac=fprime, **options) + if full_output: + x = res['x'] + info = {k: res.get(k) + for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res} + info['fvec'] = res['fun'] + return x, info, res['status'], res['message'] + else: + status = res['status'] + msg = res['message'] + if status == 0: + raise TypeError(msg) + elif status == 1: + pass + elif status in [2, 3, 4, 5]: + warnings.warn(msg, RuntimeWarning, stacklevel=2) + else: + raise TypeError(msg) + return res['x'] + + +def _root_hybr(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, + factor=100, diag=None, **unknown_options): + """ + Find the roots of a multivariate function using MINPACK's hybrd and + hybrj routines (modified Powell method). + + Options + ------- + col_deriv : bool + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + eps : float + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `eps` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the + variables. + + """ + _check_unknown_options(unknown_options) + epsfcn = eps + + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + Dfun = jac + if Dfun is None: + if band is None: + ml, mu = -10, -10 + else: + ml, mu = band[:2] + if maxfev == 0: + maxfev = 200 * (n + 1) + retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, + ml, mu, epsfcn, factor, diag) + else: + _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) + if (maxfev == 0): + maxfev = 100 * (n + 1) + retval = _minpack._hybrj(func, Dfun, x0, args, 1, + col_deriv, xtol, maxfev, factor, diag) + + x, status = retval[0], retval[-1] + + errors = {0: "Improper input parameters were entered.", + 1: "The solution converged.", + 2: "The number of calls to function has " + "reached maxfev = %d." % maxfev, + 3: "xtol=%f is too small, no further improvement " + "in the approximate\n solution " + "is possible." % xtol, + 4: "The iteration is not making good progress, as measured " + "by the \n improvement from the last five " + "Jacobian evaluations.", + 5: "The iteration is not making good progress, " + "as measured by the \n improvement from the last " + "ten iterations.", + 'unknown': "An error occurred."} + + info = retval[1] + info['fun'] = info.pop('fvec') + sol = OptimizeResult(x=x, success=(status == 1), status=status, + method="hybr") + sol.update(info) + try: + sol['message'] = errors[status] + except KeyError: + sol['message'] = errors['unknown'] + + return sol + + +LEASTSQ_SUCCESS = [1, 2, 3, 4] +LEASTSQ_FAILURE = [5, 6, 7, 8] + + +def leastsq(func, x0, args=(), Dfun=None, full_output=False, + col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + """ + Minimize the sum of squares of a set of equations. + + :: + + x = arg min(sum(func(y)**2,axis=0)) + y + + Parameters + ---------- + func : callable + Should take at least one (possibly length ``N`` vector) argument and + returns ``M`` floating point numbers. It must not return NaNs or + fitting might fail. ``M`` must be greater than or equal to ``N``. + x0 : ndarray + The starting estimate for the minimization. + args : tuple, optional + Any extra arguments to func are placed in this tuple. + Dfun : callable, optional + A function or method to compute the Jacobian of func with derivatives + across the rows. If this is None, the Jacobian will be estimated. + full_output : bool, optional + If ``True``, return all optional outputs (not just `x` and `ier`). + col_deriv : bool, optional + If ``True``, specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float, optional + Relative error desired in the sum of squares. + xtol : float, optional + Relative error desired in the approximate solution. + gtol : float, optional + Orthogonality desired between the function vector and the columns of + the Jacobian. + maxfev : int, optional + The maximum number of calls to the function. If `Dfun` is provided, + then the default `maxfev` is 100*(N+1) where N is the number of elements + in x0, otherwise the default `maxfev` is 200*(N+1). + epsfcn : float, optional + A variable used in determining a suitable step length for the forward- + difference approximation of the Jacobian (for Dfun=None). + Normally the actual step length will be sqrt(epsfcn)*x + If epsfcn is less than the machine precision, it is assumed that the + relative errors are of the order of the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for an unsuccessful + call). + cov_x : ndarray + The inverse of the Hessian. `fjac` and `ipvt` are used to construct an + estimate of the Hessian. A value of None indicates a singular matrix, + which means the curvature in parameters `x` is numerically flat. To + obtain the covariance matrix of the parameters `x`, `cov_x` must be + multiplied by the variance of the residuals -- see curve_fit. Only + returned if `full_output` is ``True``. + infodict : dict + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls + ``fvec`` + The function evaluated at the output + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + ``qtf`` + The vector (transpose(q) * fvec). + + Only returned if `full_output` is ``True``. + mesg : str + A string message giving information about the cause of failure. + Only returned if `full_output` is ``True``. + ier : int + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable 'mesg' gives more information. + + See Also + -------- + least_squares : Newer interface to solve nonlinear least-squares problems + with bounds on the variables. See ``method='lm'`` in particular. + + Notes + ----- + "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. + + cov_x is a Jacobian approximation to the Hessian of the least squares + objective function. + This approximation assumes that the objective function is based on the + difference between some observed target data (ydata) and a (non-linear) + function of the parameters `f(xdata, params)` :: + + func(params) = ydata - f(xdata, params) + + so that the objective function is :: + + min sum((ydata - f(xdata, params))**2, axis=0) + params + + The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, + or whether `x0` is a scalar. + + Examples + -------- + >>> from scipy.optimize import leastsq + >>> def func(x): + ... return 2*(x-3)**2+1 + >>> leastsq(func, 0) + (array([2.99999999]), 1) + + """ + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] + + if n > m: + raise TypeError(f"Improper input: func input vector length N={n} must" + f" not exceed func output vector length M={m}") + + if epsfcn is None: + epsfcn = finfo(dtype).eps + + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100 * (n + 1) + retval = _minpack._lmder(func, Dfun, x0, args, full_output, + col_deriv, ftol, xtol, gtol, maxfev, + factor, diag) + + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + "in the sum of squares\n are at most %f" % ftol, None], + 2: ["The relative error between two consecutive " + "iterates is at most %f" % xtol, None], + 3: ["Both actual and predicted relative reductions in " + f"the sum of squares\n are at most {ftol:f} and the " + "relative error between two consecutive " + f"iterates is at \n most {xtol:f}", None], + 4: ["The cosine of the angle between func(x) and any " + "column of the\n Jacobian is at most %f in " + "absolute value" % gtol, None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: ["ftol=%f is too small, no further reduction " + "in the sum of squares\n is possible." % ftol, + ValueError], + 7: ["xtol=%f is too small, no further improvement in " + "the approximate\n solution is possible." % xtol, + ValueError], + 8: ["gtol=%f is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine " + "precision." % gtol, ValueError]} + + # The FORTRAN return value (possible return values are >= 0 and <= 8) + info = retval[-1] + + if full_output: + cov_x = None + if info in LEASTSQ_SUCCESS: + # This was + # perm = take(eye(n), retval[1]['ipvt'] - 1, 0) + # r = triu(transpose(retval[1]['fjac'])[:n, :]) + # R = dot(r, perm) + # cov_x = inv(dot(transpose(R), R)) + # but the explicit dot product was not necessary and sometimes + # the result was not symmetric positive definite. See gh-4555. + perm = retval[1]['ipvt'] - 1 + n = len(perm) + r = triu(transpose(retval[1]['fjac'])[:n, :]) + inv_triu = linalg.get_lapack_funcs('trtri', (r,)) + try: + # inverse of permuted matrix is a permutation of matrix inverse + invR, trtri_info = inv_triu(r) # default: upper, non-unit diag + if trtri_info != 0: # explicit comparison for readability + raise LinAlgError(f'trtri returned info {trtri_info}') + invR[perm] = invR.copy() + cov_x = invR @ invR.T + except (LinAlgError, ValueError): + pass + return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) + else: + if info in LEASTSQ_FAILURE: + warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2) + elif info == 0: + raise errors[info][1](errors[info][0]) + return retval[0], info + + +def _lightweight_memoizer(f): + # very shallow memoization to address gh-13670: only remember the first set + # of parameters and corresponding function value, and only attempt to use + # them twice (the number of times the function is evaluated at x0). + def _memoized_func(params): + if _memoized_func.skip_lookup: + return f(params) + + if np.all(_memoized_func.last_params == params): + return _memoized_func.last_val + elif _memoized_func.last_params is not None: + _memoized_func.skip_lookup = True + + val = f(params) + + if _memoized_func.last_params is None: + _memoized_func.last_params = np.copy(params) + _memoized_func.last_val = val + + return val + + _memoized_func.last_params = None + _memoized_func.last_val = None + _memoized_func.skip_lookup = False + return _memoized_func + + +def _wrap_func(func, xdata, ydata, transform): + if transform is None: + def func_wrapped(params): + return func(xdata, *params) - ydata + elif transform.size == 1 or transform.ndim == 1: + def func_wrapped(params): + return transform * (func(xdata, *params) - ydata) + else: + # Chisq = (y - yd)^T C^{-1} (y-yd) + # transform = L such that C = L L^T + # C^{-1} = L^{-T} L^{-1} + # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) + # Define (y-yd)' = L^{-1} (y-yd) + # by solving + # L (y-yd)' = (y-yd) + # and minimize (y-yd)'^T (y-yd)' + def func_wrapped(params): + return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) + return func_wrapped + + +def _wrap_jac(jac, xdata, transform): + if transform is None: + def jac_wrapped(params): + return jac(xdata, *params) + elif transform.ndim == 1: + def jac_wrapped(params): + return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) + else: + def jac_wrapped(params): + return solve_triangular(transform, + np.asarray(jac(xdata, *params)), + lower=True) + return jac_wrapped + + +def _initialize_feasible(lb, ub): + p0 = np.ones_like(lb) + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + mask = lb_finite & ub_finite + p0[mask] = 0.5 * (lb[mask] + ub[mask]) + + mask = lb_finite & ~ub_finite + p0[mask] = lb[mask] + 1 + + mask = ~lb_finite & ub_finite + p0[mask] = ub[mask] - 1 + + return p0 + + +def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, + check_finite=None, bounds=(-np.inf, np.inf), method=None, + jac=None, *, full_output=False, nan_policy=None, + **kwargs): + """ + Use non-linear least squares to fit a function, f, to data. + + Assumes ``ydata = f(xdata, *params) + eps``. + + Parameters + ---------- + f : callable + The model function, f(x, ...). It must take the independent + variable as the first argument and the parameters to fit as + separate remaining arguments. + xdata : array_like + The independent variable where the data is measured. + Should usually be an M-length sequence or an (k,M)-shaped array for + functions with k predictors, and each element should be float + convertible if it is an array like object. + ydata : array_like + The dependent data, a length M array - nominally ``f(xdata, ...)``. + p0 : array_like, optional + Initial guess for the parameters (length N). If None, then the + initial values will all be 1 (if the number of parameters for the + function can be determined using introspection, otherwise a + ValueError is raised). + sigma : None or scalar or M-length sequence or MxM array, optional + Determines the uncertainty in `ydata`. If we define residuals as + ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` + depends on its number of dimensions: + + - A scalar or 1-D `sigma` should contain values of standard deviations of + errors in `ydata`. In this case, the optimized function is + ``chisq = sum((r / sigma) ** 2)``. + + - A 2-D `sigma` should contain the covariance matrix of + errors in `ydata`. In this case, the optimized function is + ``chisq = r.T @ inv(sigma) @ r``. + + .. versionadded:: 0.19 + + None (default) is equivalent of 1-D `sigma` filled with ones. + absolute_sigma : bool, optional + If True, `sigma` is used in an absolute sense and the estimated parameter + covariance `pcov` reflects these absolute values. + + If False (default), only the relative magnitudes of the `sigma` values matter. + The returned parameter covariance matrix `pcov` is based on scaling + `sigma` by a constant factor. This constant is set by demanding that the + reduced `chisq` for the optimal parameters `popt` when using the + *scaled* `sigma` equals unity. In other words, `sigma` is scaled to + match the sample variance of the residuals after the fit. Default is False. + Mathematically, + ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` + check_finite : bool, optional + If True, check that the input arrays do not contain nans of infs, + and raise a ValueError if they do. Setting this parameter to + False may silently produce nonsensical results if the input arrays + do contain nans. Default is True if `nan_policy` is not specified + explicitly and False otherwise. + bounds : 2-tuple of array_like or `Bounds`, optional + Lower and upper bounds on parameters. Defaults to no bounds. + There are two ways to specify the bounds: + + - Instance of `Bounds` class. + + - 2-tuple of array_like: Each element of the tuple must be either + an array with the length equal to the number of parameters, or a + scalar (in which case the bound is taken to be the same for all + parameters). Use ``np.inf`` with an appropriate sign to disable + bounds on all or some parameters. + + method : {'lm', 'trf', 'dogbox'}, optional + Method to use for optimization. See `least_squares` for more details. + Default is 'lm' for unconstrained problems and 'trf' if `bounds` are + provided. The method 'lm' won't work when the number of observations + is less than the number of variables, use 'trf' or 'dogbox' in this + case. + + .. versionadded:: 0.17 + jac : callable, string or None, optional + Function with signature ``jac(x, ...)`` which computes the Jacobian + matrix of the model function with respect to parameters as a dense + array_like structure. It will be scaled according to provided `sigma`. + If None (default), the Jacobian will be estimated numerically. + String keywords for 'trf' and 'dogbox' methods can be used to select + a finite difference scheme, see `least_squares`. + + .. versionadded:: 0.18 + full_output : boolean, optional + If True, this function returns additioal information: `infodict`, + `mesg`, and `ier`. + + .. versionadded:: 1.9 + nan_policy : {'raise', 'omit', None}, optional + Defines how to handle when input contains nan. + The following options are available (default is None): + + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + * None: no special handling of NaNs is performed + (except what is done by check_finite); the behavior when NaNs + are present is implementation-dependent and may change. + + Note that if this value is specified explicitly (not None), + `check_finite` will be set as False. + + .. versionadded:: 1.11 + **kwargs + Keyword arguments passed to `leastsq` for ``method='lm'`` or + `least_squares` otherwise. + + Returns + ------- + popt : array + Optimal values for the parameters so that the sum of the squared + residuals of ``f(xdata, *popt) - ydata`` is minimized. + pcov : 2-D array + The estimated approximate covariance of popt. The diagonals provide + the variance of the parameter estimate. To compute one standard + deviation errors on the parameters, use + ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between + `cov` and parameter error estimates is derived based on a linear + approximation to the model function around the optimum [1]. + When this approximation becomes inaccurate, `cov` may not provide an + accurate measure of uncertainty. + + How the `sigma` parameter affects the estimated covariance + depends on `absolute_sigma` argument, as described above. + + If the Jacobian matrix at the solution doesn't have a full rank, then + 'lm' method returns a matrix filled with ``np.inf``, on the other hand + 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute + the covariance matrix. Covariance matrices with large condition numbers + (e.g. computed with `numpy.linalg.cond`) may indicate that results are + unreliable. + infodict : dict (returned only if `full_output` is True) + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls. Methods 'trf' and 'dogbox' do not + count function calls for numerical Jacobian approximation, + as opposed to 'lm' method. + ``fvec`` + The residual values evaluated at the solution, for a 1-D `sigma` + this is ``(f(x, *popt) - ydata)/sigma``. + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + Method 'lm' only provides this information. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + Method 'lm' only provides this information. + ``qtf`` + The vector (transpose(q) * fvec). + Method 'lm' only provides this information. + + .. versionadded:: 1.9 + mesg : str (returned only if `full_output` is True) + A string message giving information about the solution. + + .. versionadded:: 1.9 + ier : int (returned only if `full_output` is True) + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable `mesg` gives more information. + + .. versionadded:: 1.9 + + Raises + ------ + ValueError + if either `ydata` or `xdata` contain NaNs, or if incompatible options + are used. + + RuntimeError + if the least-squares minimization fails. + + OptimizeWarning + if covariance of the parameters can not be estimated. + + See Also + -------- + least_squares : Minimize the sum of squares of nonlinear functions. + scipy.stats.linregress : Calculate a linear least squares regression for + two sets of measurements. + + Notes + ----- + Users should ensure that inputs `xdata`, `ydata`, and the output of `f` + are ``float64``, or else the optimization may return incorrect results. + + With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm + through `leastsq`. Note that this algorithm can only deal with + unconstrained problems. + + Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to + the docstring of `least_squares` for more information. + + Parameters to be fitted must have similar scale. Differences of multiple + orders of magnitude can lead to incorrect results. For the 'trf' and + 'dogbox' methods, the `x_scale` keyword argument can be used to scale + the parameters. + + References + ---------- + [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear + regression in groundwater flow: Three case studies. Water Resources + Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804` + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import curve_fit + + >>> def func(x, a, b, c): + ... return a * np.exp(-b * x) + c + + Define the data to be fit with some noise: + + >>> xdata = np.linspace(0, 4, 50) + >>> y = func(xdata, 2.5, 1.3, 0.5) + >>> rng = np.random.default_rng() + >>> y_noise = 0.2 * rng.normal(size=xdata.size) + >>> ydata = y + y_noise + >>> plt.plot(xdata, ydata, 'b-', label='data') + + Fit for the parameters a, b, c of the function `func`: + + >>> popt, pcov = curve_fit(func, xdata, ydata) + >>> popt + array([2.56274217, 1.37268521, 0.47427475]) + >>> plt.plot(xdata, func(xdata, *popt), 'r-', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + Constrain the optimization to the region of ``0 <= a <= 3``, + ``0 <= b <= 1`` and ``0 <= c <= 0.5``: + + >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) + >>> popt + array([2.43736712, 1. , 0.34463856]) + >>> plt.plot(xdata, func(xdata, *popt), 'g--', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend() + >>> plt.show() + + For reliable results, the model `func` should not be overparametrized; + redundant parameters can cause unreliable covariance matrices and, in some + cases, poorer quality fits. As a quick check of whether the model may be + overparameterized, calculate the condition number of the covariance matrix: + + >>> np.linalg.cond(pcov) + 34.571092161547405 # may vary + + The value is small, so it does not raise much concern. If, however, we were + to add a fourth parameter ``d`` to `func` with the same effect as ``a``: + + >>> def func2(x, a, b, c, d): + ... return a * d * np.exp(-b * x) + c # a and d are redundant + >>> popt, pcov = curve_fit(func2, xdata, ydata) + >>> np.linalg.cond(pcov) + 1.13250718925596e+32 # may vary + + Such a large value is cause for concern. The diagonal elements of the + covariance matrix, which is related to uncertainty of the fit, gives more + information: + + >>> np.diag(pcov) + array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary + + Note that the first and last terms are much larger than the other elements, + suggesting that the optimal values of these parameters are ambiguous and + that only one of these parameters is needed in the model. + + If the optimal parameters of `f` differ by multiple orders of magnitude, the + resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any + results: + + >>> ydata = func(xdata, 500000, 0.01, 15) + >>> try: + ... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf') + ... except RuntimeError as e: + ... print(e) + Optimal parameters not found: The maximum number of function evaluations is exceeded. + + If parameter scale is roughly known beforehand, it can be defined in + `x_scale` argument: + + >>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf', + ... x_scale = [1000, 1, 1]) + >>> popt + array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01]) + """ + if p0 is None: + # determine number of parameters by inspecting the function + sig = _getfullargspec(f) + args = sig.args + if len(args) < 2: + raise ValueError("Unable to determine number of fit parameters.") + n = len(args) - 1 + else: + p0 = np.atleast_1d(p0) + n = p0.size + + if isinstance(bounds, Bounds): + lb, ub = bounds.lb, bounds.ub + else: + lb, ub = prepare_bounds(bounds, n) + if p0 is None: + p0 = _initialize_feasible(lb, ub) + + bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) + if method is None: + if bounded_problem: + method = 'trf' + else: + method = 'lm' + + if method == 'lm' and bounded_problem: + raise ValueError("Method 'lm' only works for unconstrained problems. " + "Use 'trf' or 'dogbox' instead.") + + if check_finite is None: + check_finite = True if nan_policy is None else False + + # optimization may produce garbage for float32 inputs, cast them to float64 + if check_finite: + ydata = np.asarray_chkfinite(ydata, float) + else: + ydata = np.asarray(ydata, float) + + if isinstance(xdata, (list, tuple, np.ndarray)): + # `xdata` is passed straight to the user-defined `f`, so allow + # non-array_like `xdata`. + if check_finite: + xdata = np.asarray_chkfinite(xdata, float) + else: + xdata = np.asarray(xdata, float) + + if ydata.size == 0: + raise ValueError("`ydata` must not be empty!") + + # nan handling is needed only if check_finite is False because if True, + # the x-y data are already checked, and they don't contain nans. + if not check_finite and nan_policy is not None: + if nan_policy == "propagate": + raise ValueError("`nan_policy='propagate'` is not supported " + "by this function.") + + policies = [None, 'raise', 'omit'] + x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy, + policies=policies) + y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy, + policies=policies) + + if (x_contains_nan or y_contains_nan) and nan_policy == 'omit': + # ignore NaNs for N dimensional arrays + has_nan = np.isnan(xdata) + has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1))) + has_nan |= np.isnan(ydata) + + xdata = xdata[..., ~has_nan] + ydata = ydata[~has_nan] + + # Determine type of sigma + if sigma is not None: + sigma = np.asarray(sigma) + + # if 1-D or a scalar, sigma are errors, define transform = 1/sigma + if sigma.size == 1 or sigma.shape == (ydata.size, ): + transform = 1.0 / sigma + # if 2-D, sigma is the covariance matrix, + # define transform = L such that L L^T = C + elif sigma.shape == (ydata.size, ydata.size): + try: + # scipy.linalg.cholesky requires lower=True to return L L^T = A + transform = cholesky(sigma, lower=True) + except LinAlgError as e: + raise ValueError("`sigma` must be positive definite.") from e + else: + raise ValueError("`sigma` has incorrect shape.") + else: + transform = None + + func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform)) + + if callable(jac): + jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform)) + elif jac is None and method != 'lm': + jac = '2-point' + + if 'args' in kwargs: + # The specification for the model function `f` does not support + # additional arguments. Refer to the `curve_fit` docstring for + # acceptable call signatures of `f`. + raise ValueError("'args' is not a supported keyword argument.") + + if method == 'lm': + # if ydata.size == 1, this might be used for broadcast. + if ydata.size != 1 and n > ydata.size: + raise TypeError(f"The number of func parameters={n} must not" + f" exceed the number of data points={ydata.size}") + res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) + popt, pcov, infodict, errmsg, ier = res + ysize = len(infodict['fvec']) + cost = np.sum(infodict['fvec'] ** 2) + if ier not in [1, 2, 3, 4]: + raise RuntimeError("Optimal parameters not found: " + errmsg) + else: + # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. + if 'max_nfev' not in kwargs: + kwargs['max_nfev'] = kwargs.pop('maxfev', None) + + res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, + **kwargs) + + if not res.success: + raise RuntimeError("Optimal parameters not found: " + res.message) + + infodict = dict(nfev=res.nfev, fvec=res.fun) + ier = res.status + errmsg = res.message + + ysize = len(res.fun) + cost = 2 * res.cost # res.cost is half sum of squares! + popt = res.x + + # Do Moore-Penrose inverse discarding zero singular values. + _, s, VT = svd(res.jac, full_matrices=False) + threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] + s = s[s > threshold] + VT = VT[:s.size] + pcov = np.dot(VT.T / s**2, VT) + + warn_cov = False + if pcov is None or np.isnan(pcov).any(): + # indeterminate covariance + pcov = zeros((len(popt), len(popt)), dtype=float) + pcov.fill(inf) + warn_cov = True + elif not absolute_sigma: + if ysize > p0.size: + s_sq = cost / (ysize - p0.size) + pcov = pcov * s_sq + else: + pcov.fill(inf) + warn_cov = True + + if warn_cov: + warnings.warn('Covariance of the parameters could not be estimated', + category=OptimizeWarning, stacklevel=2) + + if full_output: + return popt, pcov, infodict, errmsg, ier + else: + return popt, pcov + + +def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): + """Perform a simple check on the gradient for correctness. + + """ + + x = atleast_1d(x0) + n = len(x) + x = x.reshape((n,)) + fvec = atleast_1d(fcn(x, *args)) + m = len(fvec) + fvec = fvec.reshape((m,)) + ldfjac = m + fjac = atleast_1d(Dfcn(x, *args)) + fjac = fjac.reshape((m, n)) + if col_deriv == 0: + fjac = transpose(fjac) + + xp = zeros((n,), float) + err = zeros((m,), float) + fvecp = None + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) + + fvecp = atleast_1d(fcn(xp, *args)) + fvecp = fvecp.reshape((m,)) + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) + + good = (prod(greater(err, 0.5), axis=0)) + + return (good, err) + + +def _del2(p0, p1, d): + return p0 - np.square(p1 - p0) / d + + +def _relerr(actual, desired): + return (actual - desired) / desired + + +def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): + p0 = x0 + for i in range(maxiter): + p1 = func(p0, *args) + if use_accel: + p2 = func(p1, *args) + d = p2 - 2.0 * p1 + p0 + p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) + else: + p = p1 + relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) + if np.all(np.abs(relerr) < xtol): + return p + p0 = p + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): + """ + Find a fixed point of the function. + + Given a function of one or more variables and a starting point, find a + fixed point of the function: i.e., where ``func(x0) == x0``. + + Parameters + ---------- + func : function + Function to evaluate. + x0 : array_like + Fixed point of function. + args : tuple, optional + Extra arguments to `func`. + xtol : float, optional + Convergence tolerance, defaults to 1e-08. + maxiter : int, optional + Maximum number of iterations, defaults to 500. + method : {"del2", "iteration"}, optional + Method of finding the fixed-point, defaults to "del2", + which uses Steffensen's Method with Aitken's ``Del^2`` + convergence acceleration [1]_. The "iteration" method simply iterates + the function until convergence is detected, without attempting to + accelerate the convergence. + + References + ---------- + .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c1, c2): + ... return np.sqrt(c1/(x+c2)) + >>> c1 = np.array([10,12.]) + >>> c2 = np.array([3, 5.]) + >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) + array([ 1.4920333 , 1.37228132]) + + """ + use_accel = {'del2': True, 'iteration': False}[method] + x0 = _asarray_validated(x0, as_inexact=True) + return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_numdiff.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_numdiff.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bd0d37a460b4279adbb55f4cbf067233aa16ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_numdiff.py @@ -0,0 +1,775 @@ +"""Routines for numerical differentiation.""" +import functools +import numpy as np +from numpy.linalg import norm + +from scipy.sparse.linalg import LinearOperator +from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find +from ._group_columns import group_dense, group_sparse +from scipy._lib._array_api import atleast_nd, array_namespace + + +def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): + """Adjust final difference scheme to the presence of bounds. + + Parameters + ---------- + x0 : ndarray, shape (n,) + Point at which we wish to estimate derivative. + h : ndarray, shape (n,) + Desired absolute finite difference steps. + num_steps : int + Number of `h` steps in one direction required to implement finite + difference scheme. For example, 2 means that we need to evaluate + f(x0 + 2 * h) or f(x0 - 2 * h) + scheme : {'1-sided', '2-sided'} + Whether steps in one or both directions are required. In other + words '1-sided' applies to forward and backward schemes, '2-sided' + applies to center schemes. + lb : ndarray, shape (n,) + Lower bounds on independent variables. + ub : ndarray, shape (n,) + Upper bounds on independent variables. + + Returns + ------- + h_adjusted : ndarray, shape (n,) + Adjusted absolute step sizes. Step size decreases only if a sign flip + or switching to one-sided scheme doesn't allow to take a full step. + use_one_sided : ndarray of bool, shape (n,) + Whether to switch to one-sided scheme. Informative only for + ``scheme='2-sided'``. + """ + if scheme == '1-sided': + use_one_sided = np.ones_like(h, dtype=bool) + elif scheme == '2-sided': + h = np.abs(h) + use_one_sided = np.zeros_like(h, dtype=bool) + else: + raise ValueError("`scheme` must be '1-sided' or '2-sided'.") + + if np.all((lb == -np.inf) & (ub == np.inf)): + return h, use_one_sided + + h_total = h * num_steps + h_adjusted = h.copy() + + lower_dist = x0 - lb + upper_dist = ub - x0 + + if scheme == '1-sided': + x = x0 + h_total + violated = (x < lb) | (x > ub) + fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) + h_adjusted[violated & fitting] *= -1 + + forward = (upper_dist >= lower_dist) & ~fitting + h_adjusted[forward] = upper_dist[forward] / num_steps + backward = (upper_dist < lower_dist) & ~fitting + h_adjusted[backward] = -lower_dist[backward] / num_steps + elif scheme == '2-sided': + central = (lower_dist >= h_total) & (upper_dist >= h_total) + + forward = (upper_dist >= lower_dist) & ~central + h_adjusted[forward] = np.minimum( + h[forward], 0.5 * upper_dist[forward] / num_steps) + use_one_sided[forward] = True + + backward = (upper_dist < lower_dist) & ~central + h_adjusted[backward] = -np.minimum( + h[backward], 0.5 * lower_dist[backward] / num_steps) + use_one_sided[backward] = True + + min_dist = np.minimum(upper_dist, lower_dist) / num_steps + adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) + h_adjusted[adjusted_central] = min_dist[adjusted_central] + use_one_sided[adjusted_central] = False + + return h_adjusted, use_one_sided + + +@functools.lru_cache +def _eps_for_method(x0_dtype, f0_dtype, method): + """ + Calculates relative EPS step to use for a given data type + and numdiff step method. + + Progressively smaller steps are used for larger floating point types. + + Parameters + ---------- + f0_dtype: np.dtype + dtype of function evaluation + + x0_dtype: np.dtype + dtype of parameter vector + + method: {'2-point', '3-point', 'cs'} + + Returns + ------- + EPS: float + relative step size. May be np.float16, np.float32, np.float64 + + Notes + ----- + The default relative step will be np.float64. However, if x0 or f0 are + smaller floating point types (np.float16, np.float32), then the smallest + floating point type is chosen. + """ + # the default EPS value + EPS = np.finfo(np.float64).eps + + x0_is_fp = False + if np.issubdtype(x0_dtype, np.inexact): + # if you're a floating point type then over-ride the default EPS + EPS = np.finfo(x0_dtype).eps + x0_itemsize = np.dtype(x0_dtype).itemsize + x0_is_fp = True + + if np.issubdtype(f0_dtype, np.inexact): + f0_itemsize = np.dtype(f0_dtype).itemsize + # choose the smallest itemsize between x0 and f0 + if x0_is_fp and f0_itemsize < x0_itemsize: + EPS = np.finfo(f0_dtype).eps + + if method in ["2-point", "cs"]: + return EPS**0.5 + elif method in ["3-point"]: + return EPS**(1/3) + else: + raise RuntimeError("Unknown step method, should be one of " + "{'2-point', '3-point', 'cs'}") + + +def _compute_absolute_step(rel_step, x0, f0, method): + """ + Computes an absolute step from a relative step for finite difference + calculation. + + Parameters + ---------- + rel_step: None or array-like + Relative step for the finite difference calculation + x0 : np.ndarray + Parameter vector + f0 : np.ndarray or scalar + method : {'2-point', '3-point', 'cs'} + + Returns + ------- + h : float + The absolute step size + + Notes + ----- + `h` will always be np.float64. However, if `x0` or `f0` are + smaller floating point dtypes (e.g. np.float32), then the absolute + step size will be calculated from the smallest floating point size. + """ + # this is used instead of np.sign(x0) because we need + # sign_x0 to be 1 when x0 == 0. + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + + rstep = _eps_for_method(x0.dtype, f0.dtype, method) + + if rel_step is None: + abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0)) + else: + # User has requested specific relative steps. + # Don't multiply by max(1, abs(x0) because if x0 < 1 then their + # requested step is not used. + abs_step = rel_step * sign_x0 * np.abs(x0) + + # however we don't want an abs_step of 0, which can happen if + # rel_step is 0, or x0 is 0. Instead, substitute a realistic step + dx = ((x0 + abs_step) - x0) + abs_step = np.where(dx == 0, + rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), + abs_step) + + return abs_step + + +def _prepare_bounds(bounds, x0): + """ + Prepares new-style bounds from a two-tuple specifying the lower and upper + limits for values in x0. If a value is not bound then the lower/upper bound + will be expected to be -np.inf/np.inf. + + Examples + -------- + >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5]) + (array([0., 1., 2.]), array([ 1., 2., inf])) + """ + lb, ub = (np.asarray(b, dtype=float) for b in bounds) + if lb.ndim == 0: + lb = np.resize(lb, x0.shape) + + if ub.ndim == 0: + ub = np.resize(ub, x0.shape) + + return lb, ub + + +def group_columns(A, order=0): + """Group columns of a 2-D matrix for sparse finite differencing [1]_. + + Two columns are in the same group if in each row at least one of them + has zero. A greedy sequential algorithm is used to construct groups. + + Parameters + ---------- + A : array_like or sparse matrix, shape (m, n) + Matrix of which to group columns. + order : int, iterable of int with shape (n,) or None + Permutation array which defines the order of columns enumeration. + If int or None, a random permutation is used with `order` used as + a random seed. Default is 0, that is use a random permutation but + guarantee repeatability. + + Returns + ------- + groups : ndarray of int, shape (n,) + Contains values from 0 to n_groups-1, where n_groups is the number + of found groups. Each value ``groups[i]`` is an index of a group to + which ith column assigned. The procedure was helpful only if + n_groups is significantly less than n. + + References + ---------- + .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + """ + if issparse(A): + A = csc_matrix(A) + else: + A = np.atleast_2d(A) + A = (A != 0).astype(np.int32) + + if A.ndim != 2: + raise ValueError("`A` must be 2-dimensional.") + + m, n = A.shape + + if order is None or np.isscalar(order): + rng = np.random.RandomState(order) + order = rng.permutation(n) + else: + order = np.asarray(order) + if order.shape != (n,): + raise ValueError("`order` has incorrect shape.") + + A = A[:, order] + + if issparse(A): + groups = group_sparse(m, n, A.indices, A.indptr) + else: + groups = group_dense(m, n, A) + + groups[order] = groups.copy() + + return groups + + +def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None, + f0=None, bounds=(-np.inf, np.inf), sparsity=None, + as_linear_operator=False, args=(), kwargs={}): + """Compute finite difference approximation of the derivatives of a + vector-valued function. + + If a function maps from R^n to R^m, its derivatives form m-by-n matrix + called the Jacobian, where an element (i, j) is a partial derivative of + f[i] with respect to x[j]. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to a 1-D array. + method : {'3-point', '2-point', 'cs'}, optional + Finite difference method to use: + - '2-point' - use the first order accuracy forward or backward + difference. + - '3-point' - use central difference in interior points and the + second order accuracy forward or backward difference + near the boundary. + - 'cs' - use a complex-step finite difference scheme. This assumes + that the user function is real-valued and can be + analytically continued to the complex plane. Otherwise, + produces bogus results. + rel_step : None or array_like, optional + Relative step size to use. If None (default) the absolute step size is + computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with + `rel_step` being selected automatically, see Notes. Otherwise + ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the + sign of `h` is ignored. The calculated step size is possibly adjusted + to fit into the bounds. + abs_step : array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `abs_step` is ignored. By default + relative steps are used, only if ``abs_step is not None`` are absolute + steps used. + f0 : None or array_like, optional + If not None it is assumed to be equal to ``fun(x0)``, in this case + the ``fun(x0)`` is not called. Default is None. + bounds : tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. Bounds checking is not implemented + when `as_linear_operator` is True. + sparsity : {None, array_like, sparse matrix, 2-tuple}, optional + Defines a sparsity structure of the Jacobian matrix. If the Jacobian + matrix is known to have only few non-zero elements in each row, then + it's possible to estimate its several columns by a single function + evaluation [3]_. To perform such economic computations two ingredients + are required: + + * structure : array_like or sparse matrix of shape (m, n). A zero + element means that a corresponding element of the Jacobian + identically equals to zero. + * groups : array_like of shape (n,). A column grouping for a given + sparsity structure, use `group_columns` to obtain it. + + A single array or a sparse matrix is interpreted as a sparsity + structure, and groups are computed inside the function. A tuple is + interpreted as (structure, groups). If None (default), a standard + dense differencing will be used. + + Note, that sparse differencing makes sense only for large Jacobian + matrices where each row contains few non-zero elements. + as_linear_operator : bool, optional + When True the function returns an `scipy.sparse.linalg.LinearOperator`. + Otherwise it returns a dense array or a sparse matrix depending on + `sparsity`. The linear operator provides an efficient way of computing + ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow + direct access to individual elements of the matrix. By default + `as_linear_operator` is False. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)``. + + Returns + ------- + J : {ndarray, sparse matrix, LinearOperator} + Finite difference approximation of the Jacobian matrix. + If `as_linear_operator` is True returns a LinearOperator + with shape (m, n). Otherwise it returns a dense array or sparse + matrix depending on how `sparsity` is defined. If `sparsity` + is None then a ndarray with shape (m, n) is returned. If + `sparsity` is not None returns a csr_matrix with shape (m, n). + For sparse matrices and linear operators it is always returned as + a 2-D structure, for ndarrays, if m=1 it is returned + as a 1-D gradient array with shape (n,). + + See Also + -------- + check_derivative : Check correctness of a function computing derivatives. + + Notes + ----- + If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is + determined from the smallest floating point dtype of `x0` or `fun(x0)`, + ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and + s=3 for '3-point' method. Such relative step approximately minimizes a sum + of truncation and round-off errors, see [1]_. Relative steps are used by + default. However, absolute steps are used when ``abs_step is not None``. + If any of the absolute or relative steps produces an indistinguishable + difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a + automatic step size is substituted for that particular entry. + + A finite difference scheme for '3-point' method is selected automatically. + The well-known central difference scheme is used for points sufficiently + far from the boundary, and 3-point forward or backward scheme is used for + points near the boundary. Both schemes have the second-order accuracy in + terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point + forward and backward difference schemes. + + For dense differencing when m=1 Jacobian is returned with a shape (n,), + on the other hand when n=1 Jacobian is returned with a shape (m, 1). + Our motivation is the following: a) It handles a case of gradient + computation (m=1) in a conventional way. b) It clearly separates these two + different cases. b) In all cases np.atleast_2d can be called to get 2-D + Jacobian with correct dimensions. + + References + ---------- + .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", sec. 5.7. + + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + + .. [3] B. Fornberg, "Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import approx_derivative + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> approx_derivative(f, x0, args=(1, 2)) + array([[ 1., 0.], + [-1., 0.]]) + + Bounds can be used to limit the region of function evaluation. + In the example below we compute left and right derivative at point 1.0. + + >>> def g(x): + ... return x**2 if x >= 1 else x + ... + >>> x0 = 1.0 + >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) + array([ 1.]) + >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) + array([ 2.]) + """ + if method not in ['2-point', '3-point', 'cs']: + raise ValueError("Unknown method '%s'. " % method) + + xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + x0 = xp.astype(_x, _dtype) + + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + lb, ub = _prepare_bounds(bounds, x0) + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if as_linear_operator and not (np.all(np.isinf(lb)) + and np.all(np.isinf(ub))): + raise ValueError("Bounds not supported when " + "`as_linear_operator` is True.") + + def fun_wrapped(x): + # send user function same fp type as x0. (but only if cs is not being + # used + if xp.isdtype(x.dtype, "real floating"): + x = xp.astype(x, x0.dtype) + + f = np.atleast_1d(fun(x, *args, **kwargs)) + if f.ndim > 1: + raise RuntimeError("`fun` return value has " + "more than 1 dimension.") + return f + + if f0 is None: + f0 = fun_wrapped(x0) + else: + f0 = np.atleast_1d(f0) + if f0.ndim > 1: + raise ValueError("`f0` passed has more than 1 dimension.") + + if np.any((x0 < lb) | (x0 > ub)): + raise ValueError("`x0` violates bound constraints.") + + if as_linear_operator: + if rel_step is None: + rel_step = _eps_for_method(x0.dtype, f0.dtype, method) + + return _linear_operator_difference(fun_wrapped, x0, + f0, rel_step, method) + else: + # by default we use rel_step + if abs_step is None: + h = _compute_absolute_step(rel_step, x0, f0, method) + else: + # user specifies an absolute step + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + h = abs_step + + # cannot have a zero step. This might happen if x0 is very large + # or small. In which case fall back to relative step. + dx = ((x0 + h) - x0) + h = np.where(dx == 0, + _eps_for_method(x0.dtype, f0.dtype, method) * + sign_x0 * np.maximum(1.0, np.abs(x0)), + h) + + if method == '2-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', lb, ub) + elif method == '3-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + elif method == 'cs': + use_one_sided = False + + if sparsity is None: + return _dense_difference(fun_wrapped, x0, f0, h, + use_one_sided, method) + else: + if not issparse(sparsity) and len(sparsity) == 2: + structure, groups = sparsity + else: + structure = sparsity + groups = group_columns(sparsity) + + if issparse(structure): + structure = csc_matrix(structure) + else: + structure = np.atleast_2d(structure) + + groups = np.atleast_1d(groups) + return _sparse_difference(fun_wrapped, x0, f0, h, + use_one_sided, structure, + groups, method) + + +def _linear_operator_difference(fun, x0, f0, h, method): + m = f0.size + n = x0.size + + if method == '2-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p + df = fun(x) - f0 + return df / dx + + elif method == '3-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = 2*h / norm(p) + x1 = x0 - (dx/2)*p + x2 = x0 + (dx/2)*p + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + return df / dx + + elif method == 'cs': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p*1.j + f1 = fun(x) + df = f1.imag + return df / dx + + else: + raise RuntimeError("Never be here.") + + return LinearOperator((m, n), matvec) + + +def _dense_difference(fun, x0, f0, h, use_one_sided, method): + m = f0.size + n = x0.size + J_transposed = np.empty((n, m)) + h_vecs = np.diag(h) + + for i in range(h.size): + if method == '2-point': + x = x0 + h_vecs[i] + dx = x[i] - x0[i] # Recompute dx as exactly representable number. + df = fun(x) - f0 + elif method == '3-point' and use_one_sided[i]: + x1 = x0 + h_vecs[i] + x2 = x0 + 2 * h_vecs[i] + dx = x2[i] - x0[i] + f1 = fun(x1) + f2 = fun(x2) + df = -3.0 * f0 + 4 * f1 - f2 + elif method == '3-point' and not use_one_sided[i]: + x1 = x0 - h_vecs[i] + x2 = x0 + h_vecs[i] + dx = x2[i] - x1[i] + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + elif method == 'cs': + f1 = fun(x0 + h_vecs[i]*1.j) + df = f1.imag + dx = h_vecs[i, i] + else: + raise RuntimeError("Never be here.") + + J_transposed[i] = df / dx + + if m == 1: + J_transposed = np.ravel(J_transposed) + + return J_transposed.T + + +def _sparse_difference(fun, x0, f0, h, use_one_sided, + structure, groups, method): + m = f0.size + n = x0.size + row_indices = [] + col_indices = [] + fractions = [] + + n_groups = np.max(groups) + 1 + for group in range(n_groups): + # Perturb variables which are in the same group simultaneously. + e = np.equal(group, groups) + h_vec = h * e + if method == '2-point': + x = x0 + h_vec + dx = x - x0 + df = fun(x) - f0 + # The result is written to columns which correspond to perturbed + # variables. + cols, = np.nonzero(e) + # Find all non-zero elements in selected columns of Jacobian. + i, j, _ = find(structure[:, cols]) + # Restore column indices in the full array. + j = cols[j] + elif method == '3-point': + # Here we do conceptually the same but separate one-sided + # and two-sided schemes. + x1 = x0.copy() + x2 = x0.copy() + + mask_1 = use_one_sided & e + x1[mask_1] += h_vec[mask_1] + x2[mask_1] += 2 * h_vec[mask_1] + + mask_2 = ~use_one_sided & e + x1[mask_2] -= h_vec[mask_2] + x2[mask_2] += h_vec[mask_2] + + dx = np.zeros(n) + dx[mask_1] = x2[mask_1] - x0[mask_1] + dx[mask_2] = x2[mask_2] - x1[mask_2] + + f1 = fun(x1) + f2 = fun(x2) + + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + + mask = use_one_sided[j] + df = np.empty(m) + + rows = i[mask] + df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] + + rows = i[~mask] + df[rows] = f2[rows] - f1[rows] + elif method == 'cs': + f1 = fun(x0 + h_vec*1.j) + df = f1.imag + dx = h_vec + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + else: + raise ValueError("Never be here.") + + # All that's left is to compute the fraction. We store i, j and + # fractions as separate arrays and later construct coo_matrix. + row_indices.append(i) + col_indices.append(j) + fractions.append(df[i] / dx[j]) + + row_indices = np.hstack(row_indices) + col_indices = np.hstack(col_indices) + fractions = np.hstack(fractions) + J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) + return csr_matrix(J) + + +def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), + kwargs={}): + """Check correctness of a function computing derivatives (Jacobian or + gradient) by comparison with a finite difference approximation. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-D array_like of shape (m,) or a scalar. + jac : callable + Function which computes Jacobian matrix of `fun`. It must work with + argument x the same way as `fun`. The return value must be array_like + or sparse matrix with an appropriate shape. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to 1-D array. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same + for `jac`. + + Returns + ------- + accuracy : float + The maximum among all relative errors for elements with absolute values + higher than 1 and absolute errors for elements with absolute values + less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, + then it is likely that your `jac` implementation is correct. + + See Also + -------- + approx_derivative : Compute finite difference approximation of derivative. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize._numdiff import check_derivative + >>> + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> def jac(x, c1, c2): + ... return np.array([ + ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], + ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] + ... ]) + ... + >>> + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> check_derivative(f, jac, x0, args=(1, 2)) + 2.4492935982947064e-16 + """ + J_to_test = jac(x0, *args, **kwargs) + if issparse(J_to_test): + J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, + args=args, kwargs=kwargs) + J_to_test = csr_matrix(J_to_test) + abs_err = J_to_test - J_diff + i, j, abs_err_data = find(abs_err) + J_diff_data = np.asarray(J_diff[i, j]).ravel() + return np.max(np.abs(abs_err_data) / + np.maximum(1, np.abs(J_diff_data))) + else: + J_diff = approx_derivative(fun, x0, bounds=bounds, + args=args, kwargs=kwargs) + abs_err = np.abs(J_to_test - J_diff) + return np.max(abs_err / np.maximum(1, np.abs(J_diff))) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_optimize.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1d730c469ec3a9cb3e74065df004abc771997c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_optimize.py @@ -0,0 +1,4092 @@ +#__docformat__ = "restructuredtext en" +# ******NOTICE*************** +# optimize.py module by Travis E. Oliphant +# +# You may copy and use this module as you see fit with no +# guarantee implied provided you keep this notice in all copies. +# *****END NOTICE************ + +# A collection of optimization algorithms. Version 0.5 +# CHANGES +# Added fminbound (July 2001) +# Added brute (Aug. 2002) +# Finished line search satisfying strong Wolfe conditions (Mar. 2004) +# Updated strong Wolfe conditions line search to use +# cubic-interpolation (Mar. 2004) + + +# Minimization routines + +__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', + 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', + 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', + 'line_search', 'check_grad', 'OptimizeResult', 'show_options', + 'OptimizeWarning'] + +__docformat__ = "restructuredtext en" + +import math +import warnings +import sys +import inspect +from numpy import (atleast_1d, eye, argmin, zeros, shape, squeeze, + asarray, sqrt) +import numpy as np +from scipy.linalg import cholesky, issymmetric, LinAlgError +from scipy.sparse.linalg import LinearOperator +from ._linesearch import (line_search_wolfe1, line_search_wolfe2, + line_search_wolfe2 as line_search, + LineSearchWarning) +from ._numdiff import approx_derivative +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from scipy._lib._util import (MapWrapper, check_random_state, _RichResult, + _call_callback_maybe_halt) +from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS + + +# standard status messages of optimizers +_status_message = {'success': 'Optimization terminated successfully.', + 'maxfev': 'Maximum number of function evaluations has ' + 'been exceeded.', + 'maxiter': 'Maximum number of iterations has been ' + 'exceeded.', + 'pr_loss': 'Desired error not necessarily achieved due ' + 'to precision loss.', + 'nan': 'NaN result encountered.', + 'out_of_bounds': 'The result is outside of the provided ' + 'bounds.'} + + +class MemoizeJac: + """ Decorator that caches the return values of a function returning `(fun, grad)` + each time it is called. """ + + def __init__(self, fun): + self.fun = fun + self.jac = None + self._value = None + self.x = None + + def _compute_if_needed(self, x, *args): + if not np.all(x == self.x) or self._value is None or self.jac is None: + self.x = np.asarray(x).copy() + fg = self.fun(x, *args) + self.jac = fg[1] + self._value = fg[0] + + def __call__(self, x, *args): + """ returns the function value """ + self._compute_if_needed(x, *args) + return self._value + + def derivative(self, x, *args): + self._compute_if_needed(x, *args) + return self.jac + + +def _wrap_callback(callback, method=None): + """Wrap a user-provided callback so that attributes can be attached.""" + if callback is None or method in {'tnc', 'slsqp', 'cobyla'}: + return callback # don't wrap + + sig = inspect.signature(callback) + + if set(sig.parameters) == {'intermediate_result'}: + def wrapped_callback(res): + return callback(intermediate_result=res) + elif method == 'trust-constr': + def wrapped_callback(res): + return callback(np.copy(res.x), res) + elif method == 'differential_evolution': + def wrapped_callback(res): + return callback(np.copy(res.x), res.convergence) + else: + def wrapped_callback(res): + return callback(np.copy(res.x)) + + wrapped_callback.stop_iteration = False + return wrapped_callback + + +class OptimizeResult(_RichResult): + """ + Represents the optimization result. + + Attributes + ---------- + x : ndarray + The solution of the optimization. + success : bool + Whether or not the optimizer exited successfully. + status : int + Termination status of the optimizer. Its value depends on the + underlying solver. Refer to `message` for details. + message : str + Description of the cause of the termination. + fun, jac, hess: ndarray + Values of objective function, its Jacobian and its Hessian (if + available). The Hessians may be approximations, see the documentation + of the function in question. + hess_inv : object + Inverse of the objective function's Hessian; may be an approximation. + Not available for all solvers. The type of this attribute may be + either np.ndarray or scipy.sparse.linalg.LinearOperator. + nfev, njev, nhev : int + Number of evaluations of the objective functions and of its + Jacobian and Hessian. + nit : int + Number of iterations performed by the optimizer. + maxcv : float + The maximum constraint violation. + + Notes + ----- + Depending on the specific solver being used, `OptimizeResult` may + not have all attributes listed here, and they may have additional + attributes not listed here. Since this class is essentially a + subclass of dict with attribute accessors, one can see which + attributes are available using the `OptimizeResult.keys` method. + + """ + pass + + +class OptimizeWarning(UserWarning): + pass + +def _check_positive_definite(Hk): + def is_pos_def(A): + if issymmetric(A): + try: + cholesky(A) + return True + except LinAlgError: + return False + else: + return False + if Hk is not None: + if not is_pos_def(Hk): + raise ValueError("'hess_inv0' matrix isn't positive definite.") + + +def _check_unknown_options(unknown_options): + if unknown_options: + msg = ", ".join(map(str, unknown_options.keys())) + # Stack level 4: this is called from _minimize_*, which is + # called from another function in SciPy. Level 4 is the first + # level in user code. + warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, stacklevel=4) + + +def is_finite_scalar(x): + """Test whether `x` is either a finite scalar or a finite array scalar. + + """ + return np.size(x) == 1 and np.isfinite(x) + + +_epsilon = sqrt(np.finfo(float).eps) + + +def vecnorm(x, ord=2): + if ord == np.inf: + return np.amax(np.abs(x)) + elif ord == -np.inf: + return np.amin(np.abs(x)) + else: + return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord) + + +def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None, + epsilon=None, finite_diff_rel_step=None, + hess=None): + """ + Creates a ScalarFunction object for use with scalar minimizers + (BFGS/LBFGSB/SLSQP/TNC/CG/etc). + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is an 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where 'n' is the number of independent variables. + jac : {callable, '2-point', '3-point', 'cs', None}, optional + Method for computing the gradient vector. If it is a callable, it + should be a function that returns the gradient vector: + + ``jac(x, *args) -> array_like, shape (n,)`` + + If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient + is calculated with a relative step for finite differences. If `None`, + then two-point finite differences with an absolute step is used. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` functions). + bounds : sequence, optional + Bounds on variables. 'new-style' bounds are required. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + hess : {callable, '2-point', '3-point', 'cs', None} + Computes the Hessian matrix. If it is callable, it should return the + Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + Alternatively, the keywords {'2-point', '3-point', 'cs'} select a + finite difference scheme for numerical estimation. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + + Returns + ------- + sf : ScalarFunction + """ + if callable(jac): + grad = jac + elif jac in FD_METHODS: + # epsilon is set to None so that ScalarFunction is made to use + # rel_step + epsilon = None + grad = jac + else: + # default (jac is None) is to do 2-point finite differences with + # absolute step size. ScalarFunction has to be provided an + # epsilon value that is not None to use absolute steps. This is + # normally the case from most _minimize* methods. + grad = '2-point' + epsilon = epsilon + + if hess is None: + # ScalarFunction requires something for hess, so we give a dummy + # implementation here if nothing is provided, return a value of None + # so that downstream minimisers halt. The results of `fun.hess` + # should not be used. + def hess(x, *args): + return None + + if bounds is None: + bounds = (-np.inf, np.inf) + + # ScalarFunction caches. Reuse of fun(x) during grad + # calculation reduces overall function evaluations. + sf = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, bounds, epsilon=epsilon) + + return sf + + +def _clip_x_for_func(func, bounds): + # ensures that x values sent to func are clipped to bounds + + # this is used as a mitigation for gh11403, slsqp/tnc sometimes + # suggest a move that is outside the limits by 1 or 2 ULP. This + # unclean fix makes sure x is strictly within bounds. + def eval(x): + x = _check_clip_x(x, bounds) + return func(x) + + return eval + + +def _check_clip_x(x, bounds): + if (x < bounds[0]).any() or (x > bounds[1]).any(): + warnings.warn("Values in x were outside bounds during a " + "minimize step, clipping to bounds", + RuntimeWarning, stacklevel=3) + x = np.clip(x, bounds[0], bounds[1]) + return x + + return x + + +def rosen(x): + """ + The Rosenbrock function. + + The function computed is:: + + sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + + Parameters + ---------- + x : array_like + 1-D array of points at which the Rosenbrock function is to be computed. + + Returns + ------- + f : float + The value of the Rosenbrock function. + + See Also + -------- + rosen_der, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen + >>> X = 0.1 * np.arange(10) + >>> rosen(X) + 76.56 + + For higher-dimensional input ``rosen`` broadcasts. + In the following example, we use this to plot a 2D landscape. + Note that ``rosen_hess`` does not broadcast in this manner. + + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + >>> x = np.linspace(-1, 1, 50) + >>> X, Y = np.meshgrid(x, x) + >>> ax = plt.subplot(111, projection='3d') + >>> ax.plot_surface(X, Y, rosen([X, Y])) + >>> plt.show() + """ + x = asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + +def rosen_der(x): + """ + The derivative (i.e. gradient) of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the derivative is to be computed. + + Returns + ------- + rosen_der : (N,) ndarray + The gradient of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_der + >>> X = 0.1 * np.arange(9) + >>> rosen_der(X) + array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ]) + + """ + x = asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + +def rosen_hess(x): + """ + The Hessian matrix of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + + Returns + ------- + rosen_hess : ndarray + The Hessian matrix of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_der, rosen_hess_prod + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess + >>> X = 0.1 * np.arange(4) + >>> rosen_hess(X) + array([[-38., 0., 0., 0.], + [ 0., 134., -40., 0.], + [ 0., -40., 130., -80.], + [ 0., 0., -80., 200.]]) + + """ + x = atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +def rosen_hess_prod(x, p): + """ + Product of the Hessian matrix of the Rosenbrock function with a vector. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + p : array_like + 1-D array, the vector to be multiplied by the Hessian matrix. + + Returns + ------- + rosen_hess_prod : ndarray + The Hessian matrix of the Rosenbrock function at `x` multiplied + by the vector `p`. + + See Also + -------- + rosen, rosen_der, rosen_hess + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import rosen_hess_prod + >>> X = 0.1 * np.arange(9) + >>> p = 0.5 * np.arange(9) + >>> rosen_hess_prod(X, p) + array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.]) + + """ + x = atleast_1d(x) + Hp = np.zeros(len(x), dtype=x.dtype) + Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] + Hp[1:-1] = (-400 * x[:-2] * p[:-2] + + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - + 400 * x[1:-1] * p[2:]) + Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] + return Hp + + +def _wrap_scalar_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +class _MaxFuncCallError(RuntimeError): + pass + + +def _wrap_scalar_function_maxfun_validation(function, args, maxfun): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + if ncalls[0] >= maxfun: + raise _MaxFuncCallError("Too many function calls") + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + fx = function(np.copy(x), *(wrapper_args + args)) + # Ideally, we'd like to a have a true scalar returned from f(x). For + # backwards-compatibility, also allow np.array([1.3]), + # np.array([[1.3]]) etc. + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError("The user-provided objective function " + "must return a scalar value.") from e + return fx + + return ncalls, function_wrapper + + +def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, + full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): + """ + Minimize a function using the downhill simplex algorithm. + + This algorithm only uses function values, not derivatives or second + derivatives. + + Parameters + ---------- + func : callable func(x,*args) + The objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func, i.e., ``f(x,*args)``. + xtol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + ftol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : number, optional + Maximum number of function evaluations to make. + full_output : bool, optional + Set to True if fopt and warnflag outputs are desired. + disp : bool, optional + Set to True to print convergence messages. + retall : bool, optional + Set to True to return list of solutions at each iteration. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + initial_simplex : array_like of shape (N + 1, N), optional + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + + Returns + ------- + xopt : ndarray + Parameter that minimizes function. + fopt : float + Value of function at minimum: ``fopt = func(xopt)``. + iter : int + Number of iterations performed. + funcalls : int + Number of function calls made. + warnflag : int + 1 : Maximum number of function evaluations made. + 2 : Maximum number of iterations reached. + allvecs : list + Solution at each iteration. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Nelder-Mead' `method` in particular. + + Notes + ----- + Uses a Nelder-Mead simplex algorithm to find the minimum of function of + one or more variables. + + This algorithm has a long history of successful use in applications. + But it will usually be slower than an algorithm that uses first or + second derivative information. In practice, it can have poor + performance in high-dimensional problems and is not robust to + minimizing complicated functions. Additionally, there currently is no + complete theory describing when the algorithm will successfully + converge to the minimum, or how fast it will if it does. Both the ftol and + xtol criteria must be met for convergence. + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin(f, 1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 17 + Function evaluations: 34 + >>> minimum[0] + -8.8817841970012523e-16 + + References + ---------- + .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function + minimization", The Computer Journal, 7, pp. 308-313 + + .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now + Respectable", in Numerical Analysis 1995, Proceedings of the + 1995 Dundee Biennial Conference in Numerical Analysis, D.F. + Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, + Harlow, UK, pp. 191-208. + + """ + opts = {'xatol': xtol, + 'fatol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'return_all': retall, + 'initial_simplex': initial_simplex} + + callback = _wrap_callback(callback) + res = _minimize_neldermead(func, x0, args, callback=callback, **opts) + if full_output: + retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_neldermead(func, x0, args=(), callback=None, + maxiter=None, maxfev=None, disp=False, + return_all=False, initial_simplex=None, + xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Nelder-Mead algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*200``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + initial_simplex : array_like of shape (N + 1, N) + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the jth vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + xatol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + fatol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + adaptive : bool, optional + Adapt algorithm parameters to dimensionality of problem. Useful for + high-dimensional minimization [1]_. + bounds : sequence or `Bounds`, optional + Bounds on variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + Note that this just clips all vertices in simplex based on + the bounds. + + References + ---------- + .. [1] Gao, F. and Han, L. + Implementing the Nelder-Mead simplex algorithm with adaptive + parameters. 2012. Computational Optimization and Applications. + 51:1, pp. 259-277 + + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x0 = np.atleast_1d(x0).flatten() + dtype = x0.dtype if np.issubdtype(x0.dtype, np.inexact) else np.float64 + x0 = np.asarray(x0, dtype=dtype) + + if adaptive: + dim = float(len(x0)) + rho = 1 + chi = 1 + 2/dim + psi = 0.75 - 1/(2*dim) + sigma = 1 - 1/dim + else: + rho = 1 + chi = 2 + psi = 0.5 + sigma = 0.5 + + nonzdelt = 0.05 + zdelt = 0.00025 + + if bounds is not None: + lower_bound, upper_bound = bounds.lb, bounds.ub + # check bounds + if (lower_bound > upper_bound).any(): + raise ValueError("Nelder Mead - one of the lower bounds " + "is greater than an upper bound.", + stacklevel=3) + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + if bounds is not None: + x0 = np.clip(x0, lower_bound, upper_bound) + + if initial_simplex is None: + N = len(x0) + + sim = np.empty((N + 1, N), dtype=x0.dtype) + sim[0] = x0 + for k in range(N): + y = np.array(x0, copy=True) + if y[k] != 0: + y[k] = (1 + nonzdelt)*y[k] + else: + y[k] = zdelt + sim[k + 1] = y + else: + sim = np.atleast_2d(initial_simplex).copy() + dtype = sim.dtype if np.issubdtype(sim.dtype, np.inexact) else np.float64 + sim = np.asarray(sim, dtype=dtype) + if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: + raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") + if len(x0) != sim.shape[1]: + raise ValueError("Size of `initial_simplex` is not consistent with `x0`") + N = sim.shape[1] + + if retall: + allvecs = [sim[0]] + + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 200 + maxfun = N * 200 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 200 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 200 + else: + maxfun = np.inf + + if bounds is not None: + # The default simplex construction may make all entries (for a given + # parameter) greater than an upper bound if x0 is very close to the + # upper bound. If one simply clips the simplex to the bounds this could + # make the simplex entries degenerate. If that occurs reflect into the + # interior. + msk = sim > upper_bound + # reflect into the interior + sim = np.where(msk, 2*upper_bound - sim, sim) + # but make sure the reflection is no less than the lower_bound + sim = np.clip(sim, lower_bound, upper_bound) + + one2np1 = list(range(1, N + 1)) + fsim = np.full((N + 1,), np.inf, dtype=float) + + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + try: + for k in range(N + 1): + fsim[k] = func(sim[k]) + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + + ind = np.argsort(fsim) + fsim = np.take(fsim, ind, 0) + # sort so sim[0,:] has the lowest function value + sim = np.take(sim, ind, 0) + + iterations = 1 + + while (fcalls[0] < maxfun and iterations < maxiter): + try: + if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and + np.max(np.abs(fsim[0] - fsim[1:])) <= fatol): + break + + xbar = np.add.reduce(sim[:-1], 0) / N + xr = (1 + rho) * xbar - rho * sim[-1] + if bounds is not None: + xr = np.clip(xr, lower_bound, upper_bound) + fxr = func(xr) + doshrink = 0 + + if fxr < fsim[0]: + xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] + if bounds is not None: + xe = np.clip(xe, lower_bound, upper_bound) + fxe = func(xe) + + if fxe < fxr: + sim[-1] = xe + fsim[-1] = fxe + else: + sim[-1] = xr + fsim[-1] = fxr + else: # fsim[0] <= fxr + if fxr < fsim[-2]: + sim[-1] = xr + fsim[-1] = fxr + else: # fxr >= fsim[-2] + # Perform contraction + if fxr < fsim[-1]: + xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] + if bounds is not None: + xc = np.clip(xc, lower_bound, upper_bound) + fxc = func(xc) + + if fxc <= fxr: + sim[-1] = xc + fsim[-1] = fxc + else: + doshrink = 1 + else: + # Perform an inside contraction + xcc = (1 - psi) * xbar + psi * sim[-1] + if bounds is not None: + xcc = np.clip(xcc, lower_bound, upper_bound) + fxcc = func(xcc) + + if fxcc < fsim[-1]: + sim[-1] = xcc + fsim[-1] = fxcc + else: + doshrink = 1 + + if doshrink: + for j in one2np1: + sim[j] = sim[0] + sigma * (sim[j] - sim[0]) + if bounds is not None: + sim[j] = np.clip( + sim[j], lower_bound, upper_bound) + fsim[j] = func(sim[j]) + iterations += 1 + except _MaxFuncCallError: + pass + finally: + ind = np.argsort(fsim) + sim = np.take(sim, ind, 0) + fsim = np.take(fsim, ind, 0) + if retall: + allvecs.append(sim[0]) + intermediate_result = OptimizeResult(x=sim[0], fun=fsim[0]) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + x = sim[0] + fval = np.min(fsim) + warnflag = 0 + + if fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif iterations >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + if disp: + warnings.warn(msg, RuntimeWarning, stacklevel=3) + else: + msg = _status_message['success'] + if disp: + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iterations) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x, final_simplex=(sim, fsim)) + if retall: + result['allvecs'] = allvecs + return result + + +def approx_fprime(xk, f, epsilon=_epsilon, *args): + """Finite difference approximation of the derivatives of a + scalar or vector-valued function. + + If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form + an m-by-n matrix + called the Jacobian, where an element :math:`(i, j)` is a partial + derivative of f[i] with respect to ``xk[j]``. + + Parameters + ---------- + xk : array_like + The coordinate vector at which to determine the gradient of `f`. + f : callable + Function of which to estimate the derivatives of. Has the signature + ``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array + and `args` is a tuple of any additional fixed parameters needed to + completely specify the function. The argument `xk` passed to this + function is an ndarray of shape (n,) (never a scalar even if n=1). + It must return a 1-D array_like of shape (m,) or a scalar. + + .. versionchanged:: 1.9.0 + `f` is now able to return a 1-D array-like, with the :math:`(m, n)` + Jacobian being estimated. + + epsilon : {float, array_like}, optional + Increment to `xk` to use for determining the function gradient. + If a scalar, uses the same finite difference delta for all partial + derivatives. If an array, should contain one value per element of + `xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately + 1.49e-08. + \\*args : args, optional + Any other arguments that are to be passed to `f`. + + Returns + ------- + jac : ndarray + The partial derivatives of `f` to `xk`. + + See Also + -------- + check_grad : Check correctness of gradient function against approx_fprime. + + Notes + ----- + The function gradient is determined by the forward finite difference + formula:: + + f(xk[i] + epsilon[i]) - f(xk[i]) + f'[i] = --------------------------------- + epsilon[i] + + Examples + -------- + >>> import numpy as np + >>> from scipy import optimize + >>> def func(x, c0, c1): + ... "Coordinate vector `x` should be an array of size two." + ... return c0 * x[0]**2 + c1*x[1]**2 + + >>> x = np.ones(2) + >>> c0, c1 = (1, 200) + >>> eps = np.sqrt(np.finfo(float).eps) + >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) + array([ 2. , 400.00004198]) + + """ + xk = np.asarray(xk, float) + f0 = f(xk, *args) + + return approx_derivative(f, xk, method='2-point', abs_step=epsilon, + args=args, f0=f0) + + +def check_grad(func, grad, x0, *args, epsilon=_epsilon, + direction='all', seed=None): + """Check the correctness of a gradient function by comparing it against a + (forward) finite-difference approximation of the gradient. + + Parameters + ---------- + func : callable ``func(x0, *args)`` + Function whose derivative is to be checked. + grad : callable ``grad(x0, *args)`` + Jacobian of `func`. + x0 : ndarray + Points to check `grad` against forward difference approximation of grad + using `func`. + args : \\*args, optional + Extra arguments passed to `func` and `grad`. + epsilon : float, optional + Step size used for the finite difference approximation. It defaults to + ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08. + direction : str, optional + If set to ``'random'``, then gradients along a random vector + are used to check `grad` against forward difference approximation + using `func`. By default it is ``'all'``, in which case, all + the one hot direction vectors are considered to check `grad`. + If `func` is a vector valued function then only ``'all'`` can be used. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for reproducing the return value from this function. + The random numbers generated with this seed affect the random vector + along which gradients are computed to check ``grad``. Note that `seed` + is only used when `direction` argument is set to `'random'`. + + Returns + ------- + err : float + The square root of the sum of squares (i.e., the 2-norm) of the + difference between ``grad(x0, *args)`` and the finite difference + approximation of `grad` using func at the points `x0`. + + See Also + -------- + approx_fprime + + Examples + -------- + >>> import numpy as np + >>> def func(x): + ... return x[0]**2 - 0.5 * x[1]**3 + >>> def grad(x): + ... return [2 * x[0], -1.5 * x[1]**2] + >>> from scipy.optimize import check_grad + >>> check_grad(func, grad, [1.5, -1.5]) + 2.9802322387695312e-08 # may vary + >>> rng = np.random.default_rng() + >>> check_grad(func, grad, [1.5, -1.5], + ... direction='random', seed=rng) + 2.9802322387695312e-08 + + """ + step = epsilon + x0 = np.asarray(x0) + + def g(w, func, x0, v, *args): + return func(x0 + w*v, *args) + + if direction == 'random': + _grad = np.asanyarray(grad(x0, *args)) + if _grad.ndim > 1: + raise ValueError("'random' can only be used with scalar valued" + " func") + random_state = check_random_state(seed) + v = random_state.normal(0, 1, size=(x0.shape)) + _args = (func, x0, v) + args + _func = g + vars = np.zeros((1,)) + analytical_grad = np.dot(_grad, v) + elif direction == 'all': + _args = args + _func = func + vars = x0 + analytical_grad = grad(x0, *args) + else: + raise ValueError(f"{direction} is not a valid string for " + "``direction`` argument") + + return np.sqrt(np.sum(np.abs( + (analytical_grad - approx_fprime(vars, _func, step, *_args))**2 + ))) + + +def approx_fhess_p(x0, p, fprime, epsilon, *args): + # calculate fprime(x0) first, as this may be cached by ScalarFunction + f1 = fprime(*((x0,) + args)) + f2 = fprime(*((x0 + epsilon*p,) + args)) + return (f2 - f1) / epsilon + + +class _LineSearchError(RuntimeError): + pass + + +def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, + **kwargs): + """ + Same as line_search_wolfe1, but fall back to line_search_wolfe2 if + suitable step length is not found, and raise an exception if a + suitable step length is not found. + + Raises + ------ + _LineSearchError + If no suitable step size is found + + """ + + extra_condition = kwargs.pop('extra_condition', None) + + ret = line_search_wolfe1(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + **kwargs) + + if ret[0] is not None and extra_condition is not None: + xp1 = xk + ret[0] * pk + if not extra_condition(ret[0], xp1, ret[3], ret[5]): + # Reject step if extra_condition fails + ret = (None,) + + if ret[0] is None: + # line search failed: try different one. + with warnings.catch_warnings(): + warnings.simplefilter('ignore', LineSearchWarning) + kwargs2 = {} + for key in ('c1', 'c2', 'amax'): + if key in kwargs: + kwargs2[key] = kwargs[key] + ret = line_search_wolfe2(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + extra_condition=extra_condition, + **kwargs2) + + if ret[0] is None: + raise _LineSearchError() + + return ret + + +def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, + retall=0, callback=None, xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None): + """ + Minimize a function using the BFGS algorithm. + + Parameters + ---------- + f : callable ``f(x,*args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess, shape (n,) + fprime : callable ``f'(x,*args)``, optional + Gradient of f. + args : tuple, optional + Extra arguments passed to f and fprime. + gtol : float, optional + Terminate successfully if gradient norm is less than `gtol` + norm : float, optional + Order of norm (Inf is max, -Inf is min) + epsilon : int or ndarray, optional + If `fprime` is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function to call after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return ``fopt``, ``func_calls``, ``grad_calls``, and + ``warnflag`` in addition to ``xopt``. + disp : bool, optional + Print convergence message if True. + retall : bool, optional + Return a list of results at each iteration if True. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step + size is less than ``xk * xrtol`` where ``xk`` is the current + parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional`` + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Minimum value. + gopt : ndarray + Value of gradient at minimum, f'(xopt), which should be near 0. + Bopt : ndarray + Value of 1/f''(xopt), i.e., the inverse Hessian matrix. + func_calls : int + Number of function_calls made. + grad_calls : int + Number of gradient calls made. + warnflag : integer + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + 3 : NaN result encountered. + allvecs : list + The value of `xopt` at each iteration. Only returned if `retall` is + True. + + Notes + ----- + Optimize the function, `f`, whose gradient is given by `fprime` + using the quasi-Newton method of Broyden, Fletcher, Goldfarb, + and Shanno (BFGS). + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + See Also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See ``method='BFGS'`` in particular. + + References + ---------- + Wright, and Nocedal 'Numerical Optimization', 1999, p. 198. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import fmin_bfgs + >>> def quadratic_cost(x, Q): + ... return x @ Q @ x + ... + >>> x0 = np.array([-3, -4]) + >>> cost_weight = np.diag([1., 10.]) + >>> # Note that a trailing comma is necessary for a tuple with single element + >>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 # may vary + Function evaluations: 24 # may vary + Gradient evaluations: 8 # may vary + array([ 2.85169950e-06, -4.61820139e-07]) + + >>> def quadratic_cost_grad(x, Q): + ... return 2 * Q @ x + ... + >>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,)) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 7 + Function evaluations: 8 + Gradient evaluations: 8 + array([ 2.85916637e-06, -4.54371951e-07]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall, + 'xrtol': xrtol, + 'c1': c1, + 'c2': c2, + 'hess_inv0': hess_inv0} + + callback = _wrap_callback(callback) + res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + xrtol=0, c1=1e-4, c2=0.9, + hess_inv0=None, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + BFGS algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Terminate successfully if gradient norm is less than `gtol`. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + xrtol : float, default: 0 + Relative tolerance for `x`. Terminate successfully if step size is + less than ``xk * xrtol`` where ``xk`` is the current parameter vector. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + hess_inv0 : None or ndarray, optional + Initial inverse hessian estimate, shape (n, n). If None (default) then + the identity matrix is used. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + If minimization doesn't complete successfully, with an error message of + ``Desired error not necessarily achieved due to precision loss``, then + consider setting `gtol` to a higher value. This precision loss typically + occurs when the (finite difference) numerical differentiation cannot provide + sufficient precision to satisfy the `gtol` termination criterion. + This can happen when working in single precision and a callable jac is not + provided. For single precision problems a `gtol` of 1e-3 seems to work. + """ + _check_unknown_options(unknown_options) + _check_positive_definite(hess_inv0) + retall = return_all + + x0 = asarray(x0).flatten() + if x0.ndim == 0: + x0.shape = (1,) + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + N = len(x0) + I = np.eye(N, dtype=int) + Hk = I if hess_inv0 is None else hess_inv0 + + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + xk = x0 + if retall: + allvecs = [x0] + warnflag = 0 + gnorm = vecnorm(gfk, ord=norm) + while (gnorm > gtol) and (k < maxiter): + pk = -np.dot(Hk, gfk) + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, + old_fval, old_old_fval, amin=1e-100, + amax=1e100, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + sk = alpha_k * pk + xkp1 = xk + sk + + if retall: + allvecs.append(xkp1) + xk = xkp1 + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + + yk = gfkp1 - gfk + gfk = gfkp1 + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + gnorm = vecnorm(gfk, ord=norm) + if (gnorm <= gtol): + break + + # See Chapter 5 in P.E. Frandsen, K. Jonasson, H.B. Nielsen, + # O. Tingleff: "Unconstrained Optimization", IMM, DTU. 1999. + # These notes are available here: + # http://www2.imm.dtu.dk/documents/ftp/publlec.html + if (alpha_k*vecnorm(pk) <= xrtol*(xrtol + vecnorm(xk))): + break + + if not np.isfinite(old_fval): + # We correctly found +-Inf as optimal value, or something went + # wrong. + warnflag = 2 + break + + rhok_inv = np.dot(yk, sk) + # this was handled in numeric, let it remains for more safety + # Cryptic comment above is preserved for posterity. Future reader: + # consider change to condition below proposed in gh-1261/gh-17345. + if rhok_inv == 0.: + rhok = 1000.0 + if disp: + msg = "Divide-by-zero encountered: rhok assumed large" + _print_success_message_or_warn(True, msg) + else: + rhok = 1. / rhok_inv + + A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok + A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok + Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] * + sk[np.newaxis, :]) + + fval = old_fval + + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def _print_success_message_or_warn(warnflag, message, warntype=None): + if not warnflag: + print(message) + else: + warnings.warn(message, warntype or OptimizeWarning, stacklevel=3) + + +def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.4): + """ + Minimize a function using a nonlinear conjugate gradient algorithm. + + Parameters + ---------- + f : callable, ``f(x, *args)`` + Objective function to be minimized. Here `x` must be a 1-D array of + the variables that are to be changed in the search for a minimum, and + `args` are the other (fixed) parameters of `f`. + x0 : ndarray + A user-supplied initial estimate of `xopt`, the optimal value of `x`. + It must be a 1-D array of values. + fprime : callable, ``fprime(x, *args)``, optional + A function that returns the gradient of `f` at `x`. Here `x` and `args` + are as described above for `f`. The returned value must be a 1-D array. + Defaults to None, in which case the gradient is approximated + numerically (see `epsilon`, below). + args : tuple, optional + Parameter values passed to `f` and `fprime`. Must be supplied whenever + additional fixed parameters are needed to completely specify the + functions `f` and `fprime`. + gtol : float, optional + Stop when the norm of the gradient is less than `gtol`. + norm : float, optional + Order to use for the norm of the gradient + (``-np.inf`` is min, ``np.inf`` is max). + epsilon : float or ndarray, optional + Step size(s) to use when `fprime` is approximated numerically. Can be a + scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the + floating point machine precision. Usually ``sqrt(eps)`` is about + 1.5e-8. + maxiter : int, optional + Maximum number of iterations to perform. Default is ``200 * len(x0)``. + full_output : bool, optional + If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in + addition to `xopt`. See the Returns section below for additional + information on optional return values. + disp : bool, optional + If True, return a convergence message, followed by `xopt`. + retall : bool, optional + If True, add to the returned values the results of each iteration. + callback : callable, optional + An optional user-supplied function, called after each iteration. + Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float, optional + Minimum value found, f(xopt). Only returned if `full_output` is True. + func_calls : int, optional + The number of function_calls made. Only returned if `full_output` + is True. + grad_calls : int, optional + The number of gradient calls made. Only returned if `full_output` is + True. + warnflag : int, optional + Integer value with warning status, only returned if `full_output` is + True. + + 0 : Success. + + 1 : The maximum number of iterations was exceeded. + + 2 : Gradient and/or function calls were not changing. May indicate + that precision was lost, i.e., the routine did not converge. + + 3 : NaN result encountered. + + allvecs : list of ndarray, optional + List of arrays, containing the results at each iteration. + Only returned if `retall` is True. + + See Also + -------- + minimize : common interface to all `scipy.optimize` algorithms for + unconstrained and constrained minimization of multivariate + functions. It provides an alternative way to call + ``fmin_cg``, by specifying ``method='CG'``. + + Notes + ----- + This conjugate gradient algorithm is based on that of Polak and Ribiere + [1]_. + + Conjugate gradient methods tend to work better when: + + 1. `f` has a unique global minimizing point, and no local minima or + other stationary points, + 2. `f` is, at least locally, reasonably well approximated by a + quadratic function of the variables, + 3. `f` is continuous and has a continuous gradient, + 4. `fprime` is not too large, e.g., has a norm less than 1000, + 5. The initial guess, `x0`, is reasonably close to `f` 's global + minimizing point, `xopt`. + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. + + Examples + -------- + Example 1: seek the minimum value of the expression + ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values + of the parameters and an initial guess ``(u, v) = (0, 0)``. + + >>> import numpy as np + >>> args = (2, 3, 7, 8, 9, 10) # parameter values + >>> def f(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f + >>> def gradf(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... gu = 2*a*u + b*v + d # u-component of the gradient + ... gv = b*u + 2*c*v + e # v-component of the gradient + ... return np.asarray((gu, gv)) + >>> x0 = np.asarray((0, 0)) # Initial guess. + >>> from scipy import optimize + >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res1 + array([-1.80851064, -0.25531915]) + + Example 2: solve the same problem using the `minimize` function. + (This `myopts` dictionary shows all of the available options, + although in practice only non-default values would be needed. + The returned value will be a dictionary.) + + >>> opts = {'maxiter' : None, # default value. + ... 'disp' : True, # non-default value. + ... 'gtol' : 1e-5, # default value. + ... 'norm' : np.inf, # default value. + ... 'eps' : 1.4901161193847656e-08} # default value. + >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, + ... method='CG', options=opts) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res2.x # minimum found + array([-1.80851064, -0.25531915]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_cg(f, x0, args, fprime, callback=callback, c1=c1, c2=c2, + **opts) + + if full_output: + retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_cg(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, finite_diff_rel_step=None, + c1=1e-4, c2=0.4, **unknown_options): + """ + Minimization of scalar function of one or more variables using the + conjugate gradient algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``jac='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.4 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + + retall = return_all + + x0 = asarray(x0).flatten() + if maxiter is None: + maxiter = len(x0) * 200 + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step) + + f = sf.fun + myfprime = sf.grad + + old_fval = f(x0) + gfk = myfprime(x0) + + k = 0 + xk = x0 + # Sets the initial step guess to dx ~ 1 + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + if retall: + allvecs = [xk] + warnflag = 0 + pk = -gfk + gnorm = vecnorm(gfk, ord=norm) + + sigma_3 = 0.01 + + while (gnorm > gtol) and (k < maxiter): + deltak = np.dot(gfk, gfk) + + cached_step = [None] + + def polak_ribiere_powell_step(alpha, gfkp1=None): + xkp1 = xk + alpha * pk + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + yk = gfkp1 - gfk + beta_k = max(0, np.dot(yk, gfkp1) / deltak) + pkp1 = -gfkp1 + beta_k * pk + gnorm = vecnorm(gfkp1, ord=norm) + return (alpha, xkp1, pkp1, gfkp1, gnorm) + + def descent_condition(alpha, xkp1, fp1, gfkp1): + # Polak-Ribiere+ needs an explicit check of a sufficient + # descent condition, which is not guaranteed by strong Wolfe. + # + # See Gilbert & Nocedal, "Global convergence properties of + # conjugate gradient methods for optimization", + # SIAM J. Optimization 2, 21 (1992). + cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) + alpha, xk, pk, gfk, gnorm = cached_step + + # Accept step if it leads to convergence. + if gnorm <= gtol: + return True + + # Accept step if sufficient descent condition applies. + return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk) + + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, + old_old_fval, c1=c1, c2=c2, amin=1e-100, + amax=1e100, extra_condition=descent_condition) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + # Reuse already computed results if possible + if alpha_k == cached_step[0]: + alpha_k, xk, pk, gfk, gnorm = cached_step + else: + alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) + + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + fval = old_fval + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): + warnflag = 3 + msg = _status_message['nan'] + else: + msg = _status_message['success'] + + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None, c1=1e-4, c2=0.9): + """ + Unconstrained minimization of a function using the Newton-CG method. + + Parameters + ---------- + f : callable ``f(x, *args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable ``f'(x, *args)`` + Gradient of f. + fhess_p : callable ``fhess_p(x, p, *args)``, optional + Function which computes the Hessian of f times an + arbitrary vector, p. + fhess : callable ``fhess(x, *args)``, optional + Function to compute the Hessian matrix of f. + args : tuple, optional + Extra arguments passed to f, fprime, fhess_p, and fhess + (the same set of extra arguments is supplied to all of + these functions). + epsilon : float or ndarray, optional + If fhess is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function which is called after + each iteration. Called as callback(xk), where xk is the + current parameter vector. + avextol : float, optional + Convergence is assumed when the average relative error in + the minimizer falls below this amount. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return the optional outputs. + disp : bool, optional + If True, print convergence message. + retall : bool, optional + If True, return a list of results at each iteration. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e., ``f(xopt) == fopt``. + fopt : float + Value of the function at xopt, i.e., ``fopt = f(xopt)``. + fcalls : int + Number of function calls made. + gcalls : int + Number of gradient calls made. + hcalls : int + Number of Hessian calls made. + warnflag : int + Warnings generated by the algorithm. + 1 : Maximum number of iterations exceeded. + 2 : Line search failure (precision loss). + 3 : NaN result encountered. + allvecs : list + The result at each iteration, if retall is True (see below). + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Newton-CG' `method` in particular. + + Notes + ----- + Only one of `fhess_p` or `fhess` need to be given. If `fhess` + is provided, then `fhess_p` will be ignored. If neither `fhess` + nor `fhess_p` is provided, then the hessian product will be + approximated using finite differences on `fprime`. `fhess_p` + must compute the hessian times an arbitrary vector. If it is not + given, finite-differences on `fprime` are used to compute + it. + + Newton-CG methods are also called truncated Newton methods. This + function differs from scipy.optimize.fmin_tnc because + + 1. scipy.optimize.fmin_ncg is written purely in Python using NumPy + and scipy while scipy.optimize.fmin_tnc calls a C function. + 2. scipy.optimize.fmin_ncg is only for unconstrained minimization + while scipy.optimize.fmin_tnc is for unconstrained minimization + or box constrained minimization. (Box constraints give + lower and upper bounds for each variable separately.) + + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + + References + ---------- + Wright & Nocedal, 'Numerical Optimization', 1999, p. 140. + + """ + opts = {'xtol': avextol, + 'eps': epsilon, + 'maxiter': maxiter, + 'disp': disp, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, + callback=callback, c1=c1, c2=c2, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['nfev'], res['njev'], + res['nhev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, + disp=False, return_all=False, c1=1e-4, c2=0.9, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Newton-CG algorithm. + + Note that the `jac` parameter (Jacobian) is required. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Average relative error in solution `xopt` acceptable for + convergence. + maxiter : int + Maximum number of iterations to perform. + eps : float or ndarray + If `hessp` is approximated, use this value for the step size. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + c1 : float, default: 1e-4 + Parameter for Armijo condition rule. + c2 : float, default: 0.9 + Parameter for curvature condition rule. + + Notes + ----- + Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``. + """ + _check_unknown_options(unknown_options) + if jac is None: + raise ValueError('Jacobian is required for Newton-CG method') + fhess_p = hessp + fhess = hess + avextol = xtol + epsilon = eps + retall = return_all + + x0 = asarray(x0).flatten() + # TODO: add hessp (callable or FD) to ScalarFunction? + sf = _prepare_scalar_function( + fun, x0, jac, args=args, epsilon=eps, hess=hess + ) + f = sf.fun + fprime = sf.grad + _h = sf.hess(x0) + + # Logic for hess/hessp + # - If a callable(hess) is provided, then use that + # - If hess is a FD_METHOD, or the output from hess(x) is a LinearOperator + # then create a hessp function using those. + # - If hess is None but you have callable(hessp) then use the hessp. + # - If hess and hessp are None then approximate hessp using the grad/jac. + + if (hess in FD_METHODS or isinstance(_h, LinearOperator)): + fhess = None + + def _hessp(x, p, *args): + return sf.hess(x).dot(p) + + fhess_p = _hessp + + def terminate(warnflag, msg): + if disp: + _print_success_message_or_warn(warnflag, msg) + print(" Current function value: %f" % old_fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % hcalls) + fval = old_fval + result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, + njev=sf.ngev, nhev=hcalls, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + hcalls = 0 + if maxiter is None: + maxiter = len(x0)*200 + cg_maxiter = 20*len(x0) + + xtol = len(x0) * avextol + # Make sure we enter the while loop. + update_l1norm = np.finfo(float).max + xk = np.copy(x0) + if retall: + allvecs = [xk] + k = 0 + gfk = None + old_fval = f(x0) + old_old_fval = None + float64eps = np.finfo(np.float64).eps + while update_l1norm > xtol: + if k >= maxiter: + msg = "Warning: " + _status_message['maxiter'] + return terminate(1, msg) + # Compute a search direction pk by applying the CG method to + # del2 f(xk) p = - grad f(xk) starting from 0. + b = -fprime(xk) + maggrad = np.linalg.norm(b, ord=1) + eta = min(0.5, math.sqrt(maggrad)) + termcond = eta * maggrad + xsupi = zeros(len(x0), dtype=x0.dtype) + ri = -b + psupi = -ri + i = 0 + dri0 = np.dot(ri, ri) + + if fhess is not None: # you want to compute hessian once. + A = sf.hess(xk) + hcalls += 1 + + for k2 in range(cg_maxiter): + if np.add.reduce(np.abs(ri)) <= termcond: + break + if fhess is None: + if fhess_p is None: + Ap = approx_fhess_p(xk, psupi, fprime, epsilon) + else: + Ap = fhess_p(xk, psupi, *args) + hcalls += 1 + else: + # hess was supplied as a callable or hessian update strategy, so + # A is a dense numpy array or sparse matrix + Ap = A.dot(psupi) + # check curvature + Ap = asarray(Ap).squeeze() # get rid of matrices... + curv = np.dot(psupi, Ap) + if 0 <= curv <= 3 * float64eps: + break + elif curv < 0: + if (i > 0): + break + else: + # fall back to steepest descent direction + xsupi = dri0 / (-curv) * b + break + alphai = dri0 / curv + xsupi += alphai * psupi + ri += alphai * Ap + dri1 = np.dot(ri, ri) + betai = dri1 / dri0 + psupi = -ri + betai * psupi + i += 1 + dri0 = dri1 # update np.dot(ri,ri) for next time. + else: + # curvature keeps increasing, bail out + msg = ("Warning: CG iterations didn't converge. The Hessian is not " + "positive definite.") + return terminate(3, msg) + + pk = xsupi # search direction is solution to system. + gfk = -b # gradient at xk + + try: + alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, c1=c1, c2=c2) + except _LineSearchError: + # Line search failed to find a better solution. + msg = "Warning: " + _status_message['pr_loss'] + return terminate(2, msg) + + update = alphak * pk + xk += update # upcast if necessary + if retall: + allvecs.append(xk) + k += 1 + intermediate_result = OptimizeResult(x=xk, fun=old_fval) + if _call_callback_maybe_halt(callback, intermediate_result): + return terminate(5, "") + update_l1norm = np.linalg.norm(update, ord=1) + + else: + if np.isnan(old_fval) or np.isnan(update_l1norm): + return terminate(3, _status_message['nan']) + + msg = _status_message['success'] + return terminate(0, msg) + + +def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, + full_output=0, disp=1): + """Bounded minimization for scalar functions. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : float or array scalar + Finite optimization bounds. + args : tuple, optional + Extra arguments passed to function. + xtol : float, optional + The convergence tolerance. + maxfun : int, optional + Maximum number of function evaluations allowed. + full_output : bool, optional + If True, return optional outputs. + disp : int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + fval : number + (Optional output) The function value evaluated at the minimizer. + ierr : int + (Optional output) An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + (Optional output) The number of function calls made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Bounded' `method` in particular. + + Notes + ----- + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing.) + + References + ---------- + .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + Examples + -------- + `fminbound` finds the minimizer of the function in the given range. + The following examples illustrate this. + + >>> from scipy import optimize + >>> def f(x): + ... return (x-1)**2 + >>> minimizer = optimize.fminbound(f, -4, 4) + >>> minimizer + 1.0 + >>> minimum = f(minimizer) + >>> minimum + 0.0 + >>> res = optimize.fminbound(f, 3, 4, full_output=True) + >>> minimizer, fval, ierr, numfunc = res + >>> minimizer + 3.000005960860986 + >>> minimum = f(minimizer) + >>> minimum, fval + (4.000023843479476, 4.000023843479476) + """ + options = {'xatol': xtol, + 'maxiter': maxfun, + 'disp': disp} + + res = _minimize_scalar_bounded(func, (x1, x2), args, **options) + if full_output: + return res['x'], res['fun'], res['status'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_bounded(func, bounds, args=(), + xatol=1e-5, maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + xatol : float + Absolute error in solution `xopt` acceptable for convergence. + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + # Test bounds are of correct form + if len(bounds) != 2: + raise ValueError('bounds must have two elements.') + x1, x2 = bounds + + if not (is_finite_scalar(x1) and is_finite_scalar(x2)): + raise ValueError("Optimization bounds must be finite scalars.") + + if x1 > x2: + raise ValueError("The lower bound exceeds the upper bound.") + + flag = 0 + header = ' Func-count x f(x) Procedure' + step = ' initial' + + sqrt_eps = sqrt(2.2e-16) + golden_mean = 0.5 * (3.0 - sqrt(5.0)) + a, b = x1, x2 + fulc = a + golden_mean * (b - a) + nfc, xf = fulc, fulc + rat = e = 0.0 + x = xf + fx = func(x, *args) + num = 1 + fmin_data = (1, xf, fx) + fu = np.inf + + ffulc = fnfc = fx + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if disp > 2: + print(" ") + print(header) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))): + golden = 1 + # Check for parabolic fit + if np.abs(e) > tol1: + golden = 0 + r = (xf - nfc) * (fx - ffulc) + q = (xf - fulc) * (fx - fnfc) + p = (xf - fulc) * q - (xf - nfc) * r + q = 2.0 * (q - r) + if q > 0.0: + p = -p + q = np.abs(q) + r = e + e = rat + + # Check for acceptability of parabola + if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and + (p < q * (b - xf))): + rat = (p + 0.0) / q + x = xf + rat + step = ' parabolic' + + if ((x - a) < tol2) or ((b - x) < tol2): + si = np.sign(xm - xf) + ((xm - xf) == 0) + rat = tol1 * si + else: # do a golden-section step + golden = 1 + + if golden: # do a golden-section step + if xf >= xm: + e = a - xf + else: + e = b - xf + rat = golden_mean*e + step = ' golden' + + si = np.sign(rat) + (rat == 0) + x = xf + si * np.maximum(np.abs(rat), tol1) + fu = func(x, *args) + num += 1 + fmin_data = (num, x, fu) + if disp > 2: + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + if fu <= fx: + if x >= xf: + a = xf + else: + b = xf + fulc, ffulc = nfc, fnfc + nfc, fnfc = xf, fx + xf, fx = x, fu + else: + if x < xf: + a = x + else: + b = x + if (fu <= fnfc) or (nfc == xf): + fulc, ffulc = nfc, fnfc + nfc, fnfc = x, fu + elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): + fulc, ffulc = x, fu + + xm = 0.5 * (a + b) + tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if num >= maxfun: + flag = 1 + break + + if np.isnan(xf) or np.isnan(fx) or np.isnan(fu): + flag = 2 + + fval = fx + if disp > 0: + _endprint(x, flag, fval, maxfun, xatol, disp) + + result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), + message={0: 'Solution found.', + 1: 'Maximum number of function calls ' + 'reached.', + 2: _status_message['nan']}.get(flag, ''), + x=xf, nfev=num, nit=num) + + return result + + +class Brent: + #need to rethink design of __init__ + def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, + full_output=0, disp=0): + self.func = func + self.args = args + self.tol = tol + self.maxiter = maxiter + self._mintol = 1.0e-11 + self._cg = 0.3819660 + self.xmin = None + self.fval = None + self.iter = 0 + self.funcalls = 0 + self.disp = disp + + # need to rethink design of set_bracket (new options, etc.) + def set_bracket(self, brack=None): + self.brack = brack + + def get_bracket_info(self): + #set up + func = self.func + args = self.args + brack = self.brack + ### BEGIN core bracket_info code ### + ### carefully DOCUMENT any CHANGES in core ## + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + + funcalls = 3 + else: + raise ValueError("Bracketing interval must be " + "length 2 or 3 sequence.") + ### END core bracket_info code ### + + return xa, xb, xc, fa, fb, fc, funcalls + + def optimize(self): + # set up for optimization + func = self.func + xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() + _mintol = self._mintol + _cg = self._cg + ################################# + #BEGIN CORE ALGORITHM + ################################# + x = w = v = xb + fw = fv = fx = fb + if (xa < xc): + a = xa + b = xc + else: + a = xc + b = xa + deltax = 0.0 + iter = 0 + + if self.disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + while (iter < self.maxiter): + tol1 = self.tol * np.abs(x) + _mintol + tol2 = 2.0 * tol1 + xmid = 0.5 * (a + b) + # check for convergence + if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)): + break + # XXX In the first iteration, rat is only bound in the true case + # of this conditional. This used to cause an UnboundLocalError + # (gh-4140). It should be set before the if (but to what?). + if (np.abs(deltax) <= tol1): + if (x >= xmid): + deltax = a - x # do a golden section step + else: + deltax = b - x + rat = _cg * deltax + else: # do a parabolic step + tmp1 = (x - w) * (fx - fv) + tmp2 = (x - v) * (fx - fw) + p = (x - v) * tmp2 - (x - w) * tmp1 + tmp2 = 2.0 * (tmp2 - tmp1) + if (tmp2 > 0.0): + p = -p + tmp2 = np.abs(tmp2) + dx_temp = deltax + deltax = rat + # check parabolic fit + if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and + (np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))): + rat = p * 1.0 / tmp2 # if parabolic step is useful. + u = x + rat + if ((u - a) < tol2 or (b - u) < tol2): + if xmid - x >= 0: + rat = tol1 + else: + rat = -tol1 + else: + if (x >= xmid): + deltax = a - x # if it's not do a golden section step + else: + deltax = b - x + rat = _cg * deltax + + if (np.abs(rat) < tol1): # update by at least tol1 + if rat >= 0: + u = x + tol1 + else: + u = x - tol1 + else: + u = x + rat + fu = func(*((u,) + self.args)) # calculate new output value + funcalls += 1 + + if (fu > fx): # if it's bigger than current + if (u < x): + a = u + else: + b = u + if (fu <= fw) or (w == x): + v = w + w = u + fv = fw + fw = fu + elif (fu <= fv) or (v == x) or (v == w): + v = u + fv = fu + else: + if (u >= x): + a = x + else: + b = x + v = w + w = x + x = u + fv = fw + fw = fx + fx = fu + + if self.disp > 2: + print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") + + iter += 1 + ################################# + #END CORE ALGORITHM + ################################# + + self.xmin = x + self.fval = fx + self.iter = iter + self.funcalls = funcalls + + def get_result(self, full_output=False): + if full_output: + return self.xmin, self.fval, self.iter, self.funcalls + else: + return self.xmin + + +def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): + """ + Given a function of one variable and a possible bracket, return + a local minimizer of the function isolated to a fractional precision + of tol. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. + args : tuple, optional + Additional arguments (if present). + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair + ``(xa, xb)`` to be used as initial points for a downhill bracket search + (see `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + Relative error in solution `xopt` acceptable for convergence. + full_output : bool, optional + If True, return all output args (xmin, fval, iter, + funcalls). + maxiter : int, optional + Maximum number of iterations in solution. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + iter : int + (Optional output) Number of iterations. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Brent' `method` in particular. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + Does not ensure that the minimum lies in the range specified by + `brack`. See `scipy.optimize.fminbound`. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3 respectively. In the case where `brack` is of the + form ``(xa, xb)``, we can see for the given values, the output does + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.brent(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.brent(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, iter, funcalls = res + >>> f(xmin), fval + (0.0, 0.0) + + """ + options = {'xtol': tol, + 'maxiter': maxiter} + res = _minimize_scalar_brent(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nit'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, + maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + """ + _check_unknown_options(unknown_options) + tol = xtol + if tol < 0: + raise ValueError('tolerance should be >= 0, got %r' % tol) + + brent = Brent(func=func, args=args, tol=tol, + full_output=True, maxiter=maxiter, disp=disp) + brent.set_bracket(brack) + brent.optimize() + x, fval, nit, nfev = brent.get_result(full_output=True) + + success = nit < maxiter and not (np.isnan(x) or np.isnan(fval)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(x) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, + success=success, message=message) + + +def golden(func, args=(), brack=None, tol=_epsilon, + full_output=0, maxiter=5000): + """ + Return the minimizer of a function of one variable using the golden section + method. + + Given a function of one variable and a possible bracketing interval, + return a minimizer of the function isolated to a fractional precision of + tol. + + Parameters + ---------- + func : callable func(x,*args) + Objective function to minimize. + args : tuple, optional + Additional arguments (if present), passed to func. + brack : tuple, optional + Either a triple ``(xa, xb, xc)`` where ``xa < xb < xc`` and + ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair (xa, xb) + to be used as initial points for a downhill bracket search (see + `scipy.optimize.bracket`). + The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. + tol : float, optional + x tolerance stop criterion + full_output : bool, optional + If True, return optional outputs. + maxiter : int + Maximum number of iterations to perform. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + (Optional output) Optimum function value. + funcalls : int + (Optional output) Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Golden' `method` in particular. + + Notes + ----- + Uses analog of bisection method to decrease the bracketed + interval. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3, respectively. In the case where `brack` is of the + form (xa,xb), we can see for the given values, the output need + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return (x-1)**2 + + >>> from scipy import optimize + + >>> minimizer = optimize.golden(f, brack=(1, 2)) + >>> minimizer + 1 + >>> res = optimize.golden(f, brack=(-1, 0.5, 2), full_output=True) + >>> xmin, fval, funcalls = res + >>> f(xmin), fval + (9.925165290385052e-18, 9.925165290385052e-18) + + """ + options = {'xtol': tol, 'maxiter': maxiter} + res = _minimize_scalar_golden(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_golden(func, brack=None, args=(), + xtol=_epsilon, maxiter=5000, disp=0, + **unknown_options): + """ + Options + ------- + xtol : float + Relative error in solution `xopt` acceptable for convergence. + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + """ + _check_unknown_options(unknown_options) + tol = xtol + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not" + " fulfill this requirement: (xa < xb) and (xb < xc)" + ) + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError( + "Bracketing values (xa, xb, xc) do not fulfill" + " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" + ) + funcalls = 3 + else: + raise ValueError("Bracketing interval must be length 2 or 3 sequence.") + + _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) + _gC = 1.0 - _gR + x3 = xc + x0 = xa + if (np.abs(xc - xb) > np.abs(xb - xa)): + x1 = xb + x2 = xb + _gC * (xc - xb) + else: + x2 = xb + x1 = xb - _gC * (xb - xa) + f1 = func(*((x1,) + args)) + f2 = func(*((x2,) + args)) + funcalls += 2 + nit = 0 + + if disp > 2: + print(" ") + print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") + + for i in range(maxiter): + if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)): + break + if (f2 < f1): + x0 = x1 + x1 = x2 + x2 = _gR * x1 + _gC * x3 + f1 = f2 + f2 = func(*((x2,) + args)) + else: + x3 = x2 + x2 = x1 + x1 = _gR * x2 + _gC * x0 + f2 = f1 + f1 = func(*((x1,) + args)) + funcalls += 1 + if disp > 2: + if (f1 < f2): + xmin, fval = x1, f1 + else: + xmin, fval = x2, f2 + print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}") + + nit += 1 + # end of iteration loop + + if (f1 < f2): + xmin = x1 + fval = f1 + else: + xmin = x2 + fval = f2 + + success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin)) + + if success: + message = ("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + f"(using xtol = {xtol} )") + else: + if nit >= maxiter: + message = "\nMaximum number of iterations exceeded" + if np.isnan(xmin) or np.isnan(fval): + message = f"{_status_message['nan']}" + + if disp: + _print_success_message_or_warn(not success, message) + + return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, + success=success, message=message) + + +def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): + """ + Bracket the minimum of a function. + + Given a function and distinct initial points, search in the + downhill direction (as defined by the initial points) and return + three points that bracket the minimum of the function. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to minimize. + xa, xb : float, optional + Initial points. Defaults `xa` to 0.0, and `xb` to 1.0. + A local minimum need not be contained within this interval. + args : tuple, optional + Additional arguments (if present), passed to `func`. + grow_limit : float, optional + Maximum grow limit. Defaults to 110.0 + maxiter : int, optional + Maximum number of iterations to perform. Defaults to 1000. + + Returns + ------- + xa, xb, xc : float + Final points of the bracket. + fa, fb, fc : float + Objective function values at the bracket points. + funcalls : int + Number of function evaluations made. + + Raises + ------ + BracketError + If no valid bracket is found before the algorithm terminates. + See notes for conditions of a valid bracket. + + Notes + ----- + The algorithm attempts to find three strictly ordered points (i.e. + :math:`x_a < x_b < x_c` or :math:`x_c < x_b < x_a`) satisfying + :math:`f(x_b) ≤ f(x_a)` and :math:`f(x_b) ≤ f(x_c)`, where one of the + inequalities must be satistfied strictly and all :math:`x_i` must be + finite. + + Examples + -------- + This function can find a downward convex region of a function: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import bracket + >>> def f(x): + ... return 10*x**2 + 3*x + 5 + >>> x = np.linspace(-2, 2) + >>> y = f(x) + >>> init_xa, init_xb = 0.1, 1 + >>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb) + >>> plt.axvline(x=init_xa, color="k", linestyle="--") + >>> plt.axvline(x=init_xb, color="k", linestyle="--") + >>> plt.plot(x, y, "-k") + >>> plt.plot(xa, fa, "bx") + >>> plt.plot(xb, fb, "rx") + >>> plt.plot(xc, fc, "bx") + >>> plt.show() + + Note that both initial points were to the right of the minimum, and the + third point was found in the "downhill" direction: the direction + in which the function appeared to be decreasing (to the left). + The final points are strictly ordered, and the function value + at the middle point is less than the function values at the endpoints; + it follows that a minimum must lie within the bracket. + + """ + _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 + _verysmall_num = 1e-21 + # convert to numpy floats if not already + xa, xb = np.asarray([xa, xb]) + fa = func(*(xa,) + args) + fb = func(*(xb,) + args) + if (fa < fb): # Switch so fa > fb + xa, xb = xb, xa + fa, fb = fb, fa + xc = xb + _gold * (xb - xa) + fc = func(*((xc,) + args)) + funcalls = 3 + iter = 0 + while (fc < fb): + tmp1 = (xb - xa) * (fb - fc) + tmp2 = (xb - xc) * (fb - fa) + val = tmp2 - tmp1 + if np.abs(val) < _verysmall_num: + denom = 2.0 * _verysmall_num + else: + denom = 2.0 * val + w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom + wlim = xb + grow_limit * (xc - xb) + msg = ("No valid bracket was found before the iteration limit was " + "reached. Consider trying different initial points or " + "increasing `maxiter`.") + if iter > maxiter: + raise RuntimeError(msg) + iter += 1 + if (w - xc) * (xb - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xa = xb + xb = w + fa = fb + fb = fw + break + elif (fw > fb): + xc = w + fc = fw + break + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(wlim - xc) >= 0.0: + w = wlim + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(xc - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xb = xc + xc = w + w = xc + _gold * (xc - xb) + fb = fc + fc = fw + fw = func(*((w,) + args)) + funcalls += 1 + else: + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + xa = xb + xb = xc + xc = w + fa = fb + fb = fc + fc = fw + + # three conditions for a valid bracket + cond1 = (fb < fc and fb <= fa) or (fb < fa and fb <= fc) + cond2 = (xa < xb < xc or xc < xb < xa) + cond3 = np.isfinite(xa) and np.isfinite(xb) and np.isfinite(xc) + msg = ("The algorithm terminated without finding a valid bracket. " + "Consider trying different initial points.") + if not (cond1 and cond2 and cond3): + e = BracketError(msg) + e.data = (xa, xb, xc, fa, fb, fc, funcalls) + raise e + + return xa, xb, xc, fa, fb, fc, funcalls + + +class BracketError(RuntimeError): + pass + + +def _recover_from_bracket_error(solver, fun, bracket, args, **options): + # `bracket` was originally written without checking whether the resulting + # bracket is valid. `brent` and `golden` built on top of it without + # checking the returned bracket for validity, and their output can be + # incorrect without warning/error if the original bracket is invalid. + # gh-14858 noticed the problem, and the following is the desired + # behavior: + # - `scipy.optimize.bracket`, `scipy.optimize.brent`, and + # `scipy.optimize.golden` should raise an error if the bracket is + # invalid, as opposed to silently returning garbage + # - `scipy.optimize.minimize_scalar` should return with `success=False` + # and other information + # The changes that would be required to achieve this the traditional + # way (`return`ing all the required information from bracket all the way + # up to `minimizer_scalar`) are extensive and invasive. (See a6aa40d.) + # We can achieve the same thing by raising the error in `bracket`, but + # storing the information needed by `minimize_scalar` in the error object, + # and intercepting it here. + try: + res = solver(fun, bracket, args, **options) + except BracketError as e: + msg = str(e) + xa, xb, xc, fa, fb, fc, funcalls = e.data + xs, fs = [xa, xb, xc], [fa, fb, fc] + if np.any(np.isnan([xs, fs])): + x, fun = np.nan, np.nan + else: + imin = np.argmin(fs) + x, fun = xs[imin], fs[imin] + return OptimizeResult(fun=fun, nfev=funcalls, x=x, + nit=0, success=False, message=msg) + return res + + +def _line_for_search(x0, alpha, lower_bound, upper_bound): + """ + Given a parameter vector ``x0`` with length ``n`` and a direction + vector ``alpha`` with length ``n``, and lower and upper bounds on + each of the ``n`` parameters, what are the bounds on a scalar + ``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``. + + + Parameters + ---------- + x0 : np.array. + The vector representing the current location. + Note ``np.shape(x0) == (n,)``. + alpha : np.array. + The vector representing the direction. + Note ``np.shape(alpha) == (n,)``. + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + + Returns + ------- + res : tuple ``(lmin, lmax)`` + The bounds for ``l`` such that + ``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]`` + for all ``i``. + + """ + # get nonzero indices of alpha so we don't get any zero division errors. + # alpha will not be all zero, since it is called from _linesearch_powell + # where we have a check for this. + nonzero, = alpha.nonzero() + lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero] + x0, alpha = x0[nonzero], alpha[nonzero] + low = (lower_bound - x0) / alpha + high = (upper_bound - x0) / alpha + + # positive and negative indices + pos = alpha > 0 + + lmin_pos = np.where(pos, low, 0) + lmin_neg = np.where(pos, 0, high) + lmax_pos = np.where(pos, high, 0) + lmax_neg = np.where(pos, 0, low) + + lmin = np.max(lmin_pos + lmin_neg) + lmax = np.min(lmax_pos + lmax_neg) + + # if x0 is outside the bounds, then it is possible that there is + # no way to get back in the bounds for the parameters being updated + # with the current direction alpha. + # when this happens, lmax < lmin. + # If this is the case, then we can just return (0, 0) + return (lmin, lmax) if lmax >= lmin else (0, 0) + + +def _linesearch_powell(func, p, xi, tol=1e-3, + lower_bound=None, upper_bound=None, fval=None): + """Line-search algorithm using fminbound. + + Find the minimum of the function ``func(x0 + alpha*direc)``. + + lower_bound : np.array. + The lower bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` + should be ``-np.inf``. + Note ``np.shape(lower_bound) == (n,)``. + upper_bound : np.array. + The upper bounds for each parameter in ``x0``. If the ``i``th + parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` + should be ``np.inf``. + Note ``np.shape(upper_bound) == (n,)``. + fval : number. + ``fval`` is equal to ``func(p)``, the idea is just to avoid + recomputing it so we can limit the ``fevals``. + + """ + def myfunc(alpha): + return func(p + alpha*xi) + + # if xi is zero, then don't optimize + if not np.any(xi): + return ((fval, p, xi) if fval is not None else (func(p), p, xi)) + elif lower_bound is None and upper_bound is None: + # non-bounded minimization + res = _recover_from_bracket_error(_minimize_scalar_brent, + myfunc, None, tuple(), xtol=tol) + alpha_min, fret = res.x, res.fun + xi = alpha_min * xi + return squeeze(fret), p + xi, xi + else: + bound = _line_for_search(p, xi, lower_bound, upper_bound) + if np.isneginf(bound[0]) and np.isposinf(bound[1]): + # equivalent to unbounded + return _linesearch_powell(func, p, xi, fval=fval, tol=tol) + elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]): + # we can use a bounded scalar minimization + res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100) + xi = res.x * xi + return squeeze(res.fun), p + xi, xi + else: + # only bounded on one side. use the tangent function to convert + # the infinity bound to a finite bound. The new bounded region + # is a subregion of the region bounded by -np.pi/2 and np.pi/2. + bound = np.arctan(bound[0]), np.arctan(bound[1]) + res = _minimize_scalar_bounded( + lambda x: myfunc(np.tan(x)), + bound, + xatol=tol / 100) + xi = np.tan(res.x) * xi + return squeeze(res.fun), p + xi, xi + + +def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, + maxfun=None, full_output=0, disp=1, retall=0, callback=None, + direc=None): + """ + Minimize a function using modified Powell's method. + + This method only uses function values, not derivatives. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func. + xtol : float, optional + Line-search error tolerance. + ftol : float, optional + Relative error in ``func(xopt)`` acceptable for convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : int, optional + Maximum number of function evaluations to make. + full_output : bool, optional + If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and + ``warnflag`` are returned. + disp : bool, optional + If True, print convergence messages. + retall : bool, optional + If True, return a list of the solution at each iteration. + callback : callable, optional + An optional user-supplied function, called after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + direc : ndarray, optional + Initial fitting step and parameter order set as an (N, N) array, where N + is the number of fitting parameters in `x0`. Defaults to step size 1.0 + fitting all parameters simultaneously (``np.eye((N, N))``). To + prevent initial consideration of values in a step or to change initial + step size, set to 0 or desired step size in the Jth position in the Mth + block, where J is the position in `x0` and M is the desired evaluation + step, with steps being evaluated in index order. Step size and ordering + will change freely as minimization proceeds. + + Returns + ------- + xopt : ndarray + Parameter which minimizes `func`. + fopt : number + Value of function at minimum: ``fopt = func(xopt)``. + direc : ndarray + Current direction set. + iter : int + Number of iterations. + funcalls : int + Number of function calls made. + warnflag : int + Integer warning flag: + 1 : Maximum number of function evaluations. + 2 : Maximum number of iterations. + 3 : NaN result encountered. + 4 : The result is out of the provided bounds. + allvecs : list + List of solutions at each iteration. + + See also + -------- + minimize: Interface to unconstrained minimization algorithms for + multivariate functions. See the 'Powell' method in particular. + + Notes + ----- + Uses a modification of Powell's method to find the minimum of + a function of N variables. Powell's method is a conjugate + direction method. + + The algorithm has two loops. The outer loop merely iterates over the inner + loop. The inner loop minimizes over each current direction in the direction + set. At the end of the inner loop, if certain conditions are met, the + direction that gave the largest decrease is dropped and replaced with the + difference between the current estimated x and the estimated x from the + beginning of the inner-loop. + + The technical conditions for replacing the direction of greatest + increase amount to checking that + + 1. No further gain can be made along the direction of greatest increase + from that iteration. + 2. The direction of greatest increase accounted for a large sufficient + fraction of the decrease in the function value from that iteration of + the inner loop. + + References + ---------- + Powell M.J.D. (1964) An efficient method for finding the minimum of a + function of several variables without calculating derivatives, + Computer Journal, 7 (2):155-162. + + Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: + Numerical Recipes (any edition), Cambridge University Press + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin_powell(f, -1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 2 + Function evaluations: 16 + >>> minimum + array(0.0) + + """ + opts = {'xtol': xtol, + 'ftol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'direc': direc, + 'return_all': retall} + + callback = _wrap_callback(callback) + res = _minimize_powell(func, x0, args, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_powell(func, x0, args=(), callback=None, bounds=None, + xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, + disp=False, direc=None, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + modified Powell algorithm. + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is a 1-D array with shape (n,) and ``args`` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where ``n`` is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + The present documentation is specific to ``method='powell'``, but other + options are available. See documentation for `scipy.optimize.minimize`. + bounds : sequence or `Bounds`, optional + Bounds on decision variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + If bounds are not provided, then an unbounded line search will be used. + If bounds are provided and the initial guess is within the bounds, then + every function evaluation throughout the minimization procedure will be + within the bounds. If bounds are provided, the initial guess is outside + the bounds, and `direc` is full rank (or left to default), then some + function evaluations during the first iteration may be outside the + bounds, but every function evaluation after the first iteration will be + within the bounds. If `direc` is not full rank, then some parameters + may not be optimized and the solution is not guaranteed to be within + the bounds. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. Depending on the + method each iteration may use several function evaluations. + disp : bool + Set to True to print convergence messages. + + See method-specific options for ``method='powell'`` below. + callback : callable, optional + Called after each iteration. The signature is: + + ``callback(xk)`` + + where ``xk`` is the current parameter vector. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + ftol : float + Relative error in ``fun(xopt)`` acceptable for convergence. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*1000``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + direc : ndarray + Initial set of direction vectors for the Powell method. + return_all : bool, optional + Set to True to return a list of the best solution at each of the + iterations. + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + x = asarray(x0).flatten() + if retall: + allvecs = [x] + N = len(x) + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 1000 + maxfun = N * 1000 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 1000 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 1000 + else: + maxfun = np.inf + + # we need to use a mutable object here that we can update in the + # wrapper function + fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) + + if direc is None: + direc = eye(N, dtype=float) + else: + direc = asarray(direc, dtype=float) + if np.linalg.matrix_rank(direc) != direc.shape[0]: + warnings.warn("direc input is not full rank, some parameters may " + "not be optimized", + OptimizeWarning, stacklevel=3) + + if bounds is None: + # don't make these arrays of all +/- inf. because + # _linesearch_powell will do an unnecessary check of all the elements. + # just keep them None, _linesearch_powell will not have to check + # all the elements. + lower_bound, upper_bound = None, None + else: + # bounds is standardized in _minimize.py. + lower_bound, upper_bound = bounds.lb, bounds.ub + if np.any(lower_bound > x0) or np.any(x0 > upper_bound): + warnings.warn("Initial guess is not within the specified bounds", + OptimizeWarning, stacklevel=3) + + fval = squeeze(func(x)) + x1 = x.copy() + iter = 0 + while True: + try: + fx = fval + bigind = 0 + delta = 0.0 + for i in range(N): + direc1 = direc[i] + fx2 = fval + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval) + if (fx2 - fval) > delta: + delta = fx2 - fval + bigind = i + iter += 1 + if retall: + allvecs.append(x) + intermediate_result = OptimizeResult(x=x, fun=fval) + if _call_callback_maybe_halt(callback, intermediate_result): + break + bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20 + if 2.0 * (fx - fval) <= bnd: + break + if fcalls[0] >= maxfun: + break + if iter >= maxiter: + break + if np.isnan(fx) and np.isnan(fval): + # Ended up in a nan-region: bail out + break + + # Construct the extrapolated point + direc1 = x - x1 + x1 = x.copy() + # make sure that we don't go outside the bounds when extrapolating + if lower_bound is None and upper_bound is None: + lmax = 1 + else: + _, lmax = _line_for_search(x, direc1, lower_bound, upper_bound) + x2 = x + min(lmax, 1) * direc1 + fx2 = squeeze(func(x2)) + + if (fx > fx2): + t = 2.0*(fx + fx2 - 2.0*fval) + temp = (fx - fval - delta) + t *= temp*temp + temp = fx - fx2 + t -= delta*temp*temp + if t < 0.0: + fval, x, direc1 = _linesearch_powell( + func, x, direc1, + tol=xtol * 100, + lower_bound=lower_bound, + upper_bound=upper_bound, + fval=fval + ) + if np.any(direc1): + direc[bigind] = direc[-1] + direc[-1] = direc1 + except _MaxFuncCallError: + break + + warnflag = 0 + msg = _status_message['success'] + # out of bounds is more urgent than exceeding function evals or iters, + # but I don't want to cause inconsistencies by changing the + # established warning flags for maxfev and maxiter, so the out of bounds + # warning flag becomes 3, but is checked for first. + if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)): + warnflag = 4 + msg = _status_message['out_of_bounds'] + elif fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + elif iter >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + elif np.isnan(fval) or np.isnan(x).any(): + warnflag = 3 + msg = _status_message['nan'] + + if disp: + _print_success_message_or_warn(warnflag, msg, RuntimeWarning) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iter) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x) + if retall: + result['allvecs'] = allvecs + return result + + +def _endprint(x, flag, fval, maxfun, xtol, disp): + if flag == 0: + if disp > 1: + print("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + "(using xtol = ", xtol, ")") + return + + if flag == 1: + msg = ("\nMaximum number of function evaluations exceeded --- " + "increase maxfun argument.\n") + elif flag == 2: + msg = "\n{}".format(_status_message['nan']) + + _print_success_message_or_warn(flag, msg) + return + + +def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, + disp=False, workers=1): + """Minimize a function over a given range by brute force. + + Uses the "brute force" method, i.e., computes the function's value + at each point of a multidimensional grid of points, to find the global + minimum of the function. + + The function is evaluated everywhere in the range with the datatype of the + first call to the function, as enforced by the ``vectorize`` NumPy + function. The value and type of the function evaluation returned when + ``full_output=True`` are affected in addition by the ``finish`` argument + (see Notes). + + The brute force approach is inefficient because the number of grid points + increases exponentially - the number of grid points to evaluate is + ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even + moderately sized problems can take a long time to run, and/or run into + memory limitations. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the + form ``f(x, *args)``, where ``x`` is the argument in + the form of a 1-D array and ``args`` is a tuple of any + additional fixed parameters needed to completely specify + the function. + ranges : tuple + Each component of the `ranges` tuple must be either a + "slice object" or a range tuple of the form ``(low, high)``. + The program uses these to create the grid of points on which + the objective function will be computed. See `Note 2` for + more detail. + args : tuple, optional + Any additional fixed parameters needed to completely specify + the function. + Ns : int, optional + Number of grid points along the axes, if not otherwise + specified. See `Note2`. + full_output : bool, optional + If True, return the evaluation grid and the objective function's + values on it. + finish : callable, optional + An optimization function that is called with the result of brute force + minimization as initial guess. `finish` should take `func` and + the initial guess as positional arguments, and take `args` as + keyword arguments. It may additionally take `full_output` + and/or `disp` as keyword arguments. Use None if no "polishing" + function is to be used. See Notes for more details. + disp : bool, optional + Set to True to print convergence messages from the `finish` callable. + workers : int or map-like callable, optional + If `workers` is an int the grid is subdivided into `workers` + sections and evaluated in parallel (uses + `multiprocessing.Pool `). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the grid in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.3.0 + + Returns + ------- + x0 : ndarray + A 1-D array containing the coordinates of a point at which the + objective function had its minimum value. (See `Note 1` for + which point is returned.) + fval : float + Function value at the point `x0`. (Returned when `full_output` is + True.) + grid : tuple + Representation of the evaluation grid. It has the same + length as `x0`. (Returned when `full_output` is True.) + Jout : ndarray + Function values at each point of the evaluation + grid, i.e., ``Jout = func(*grid)``. (Returned + when `full_output` is True.) + + See Also + -------- + basinhopping, differential_evolution + + Notes + ----- + *Note 1*: The program finds the gridpoint at which the lowest value + of the objective function occurs. If `finish` is None, that is the + point returned. When the global minimum occurs within (or not very far + outside) the grid's boundaries, and the grid is fine enough, that + point will be in the neighborhood of the global minimum. + + However, users often employ some other optimization program to + "polish" the gridpoint values, i.e., to seek a more precise + (local) minimum near `brute's` best gridpoint. + The `brute` function's `finish` option provides a convenient way to do + that. Any polishing program used must take `brute's` output as its + initial guess as a positional argument, and take `brute's` input values + for `args` as keyword arguments, otherwise an error will be raised. + It may additionally take `full_output` and/or `disp` as keyword arguments. + + `brute` assumes that the `finish` function returns either an + `OptimizeResult` object or a tuple in the form: + ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing + value of the argument, ``Jmin`` is the minimum value of the objective + function, "..." may be some other returned values (which are not used + by `brute`), and ``statuscode`` is the status code of the `finish` program. + + Note that when `finish` is not None, the values returned are those + of the `finish` program, *not* the gridpoint ones. Consequently, + while `brute` confines its search to the input grid points, + the `finish` program's results usually will not coincide with any + gridpoint, and may fall outside the grid's boundary. Thus, if a + minimum only needs to be found over the provided grid points, make + sure to pass in `finish=None`. + + *Note 2*: The grid of points is a `numpy.mgrid` object. + For `brute` the `ranges` and `Ns` inputs have the following effect. + Each component of the `ranges` tuple can be either a slice object or a + two-tuple giving a range of values, such as (0, 5). If the component is a + slice object, `brute` uses it directly. If the component is a two-tuple + range, `brute` internally converts it to a slice object that interpolates + `Ns` points from its low-value to its high-value, inclusive. + + Examples + -------- + We illustrate the use of `brute` to seek the global minimum of a function + of two variables that is given as the sum of a positive-definite + quadratic and two deep "Gaussian-shaped" craters. Specifically, define + the objective function `f` as the sum of three other functions, + ``f = f1 + f2 + f3``. We suppose each of these has a signature + ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions + are as defined below. + + >>> import numpy as np + >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + >>> def f1(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + >>> def f2(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + >>> def f3(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + >>> def f(z, *params): + ... return f1(z, *params) + f2(z, *params) + f3(z, *params) + + Thus, the objective function may have local minima near the minimum + of each of the three functions of which it is composed. To + use `fmin` to polish its gridpoint result, we may then continue as + follows: + + >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + >>> from scipy import optimize + >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, + ... finish=optimize.fmin) + >>> resbrute[0] # global minimum + array([-1.05665192, 1.80834843]) + >>> resbrute[1] # function value at global minimum + -3.4085818767 + + Note that if `finish` had been set to None, we would have gotten the + gridpoint [-1.0 1.75] where the rounded function value is -2.892. + + """ + N = len(ranges) + if N > 40: + raise ValueError("Brute Force not possible with more " + "than 40 variables.") + lrange = list(ranges) + for k in range(N): + if not isinstance(lrange[k], slice): + if len(lrange[k]) < 3: + lrange[k] = tuple(lrange[k]) + (complex(Ns),) + lrange[k] = slice(*lrange[k]) + if (N == 1): + lrange = lrange[0] + + grid = np.mgrid[lrange] + + # obtain an array of parameters that is iterable by a map-like callable + inpt_shape = grid.shape + if (N > 1): + grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T + + if not np.iterable(args): + args = (args,) + + wrapped_func = _Brute_Wrapper(func, args) + + # iterate over input arrays, possibly in parallel + with MapWrapper(pool=workers) as mapper: + Jout = np.array(list(mapper(wrapped_func, grid))) + if (N == 1): + grid = (grid,) + Jout = np.squeeze(Jout) + elif (N > 1): + Jout = np.reshape(Jout, inpt_shape[1:]) + grid = np.reshape(grid.T, inpt_shape) + + Nshape = shape(Jout) + + indx = argmin(Jout.ravel(), axis=-1) + Nindx = np.empty(N, int) + xmin = np.empty(N, float) + for k in range(N - 1, -1, -1): + thisN = Nshape[k] + Nindx[k] = indx % Nshape[k] + indx = indx // thisN + for k in range(N): + xmin[k] = grid[k][tuple(Nindx)] + + Jmin = Jout[tuple(Nindx)] + if (N == 1): + grid = grid[0] + xmin = xmin[0] + + if callable(finish): + # set up kwargs for `finish` function + finish_args = _getfullargspec(finish).args + finish_kwargs = dict() + if 'full_output' in finish_args: + finish_kwargs['full_output'] = 1 + if 'disp' in finish_args: + finish_kwargs['disp'] = disp + elif 'options' in finish_args: + # pass 'disp' as `options` + # (e.g., if `finish` is `minimize`) + finish_kwargs['options'] = {'disp': disp} + + # run minimizer + res = finish(func, xmin, args=args, **finish_kwargs) + + if isinstance(res, OptimizeResult): + xmin = res.x + Jmin = res.fun + success = res.success + else: + xmin = res[0] + Jmin = res[1] + success = res[-1] == 0 + if not success: + if disp: + warnings.warn("Either final optimization did not succeed or `finish` " + "does not return `statuscode` as its last argument.", + RuntimeWarning, stacklevel=2) + + if full_output: + return xmin, Jmin, grid, Jout + else: + return xmin + + +class _Brute_Wrapper: + """ + Object to wrap user cost function for optimize.brute, allowing picklability + """ + + def __init__(self, f, args): + self.f = f + self.args = [] if args is None else args + + def __call__(self, x): + # flatten needed for one dimensional case. + return self.f(np.asarray(x).flatten(), *self.args) + + +def show_options(solver=None, method=None, disp=True): + """ + Show documentation for additional options of optimization solvers. + + These are method-specific options that can be supplied through the + ``options`` dict. + + Parameters + ---------- + solver : str + Type of optimization solver. One of 'minimize', 'minimize_scalar', + 'root', 'root_scalar', 'linprog', or 'quadratic_assignment'. + method : str, optional + If not given, shows all methods of the specified solver. Otherwise, + show only the options for the specified method. Valid values + corresponds to methods' names of respective solver (e.g., 'BFGS' for + 'minimize'). + disp : bool, optional + Whether to print the result rather than returning it. + + Returns + ------- + text + Either None (for disp=True) or the text string (disp=False) + + Notes + ----- + The solver-specific methods are: + + `scipy.optimize.minimize` + + - :ref:`Nelder-Mead ` + - :ref:`Powell ` + - :ref:`CG ` + - :ref:`BFGS ` + - :ref:`Newton-CG ` + - :ref:`L-BFGS-B ` + - :ref:`TNC ` + - :ref:`COBYLA ` + - :ref:`SLSQP ` + - :ref:`dogleg ` + - :ref:`trust-ncg ` + + `scipy.optimize.root` + + - :ref:`hybr ` + - :ref:`lm ` + - :ref:`broyden1 ` + - :ref:`broyden2 ` + - :ref:`anderson ` + - :ref:`linearmixing ` + - :ref:`diagbroyden ` + - :ref:`excitingmixing ` + - :ref:`krylov ` + - :ref:`df-sane ` + + `scipy.optimize.minimize_scalar` + + - :ref:`brent ` + - :ref:`golden ` + - :ref:`bounded ` + + `scipy.optimize.root_scalar` + + - :ref:`bisect ` + - :ref:`brentq ` + - :ref:`brenth ` + - :ref:`ridder ` + - :ref:`toms748 ` + - :ref:`newton ` + - :ref:`secant ` + - :ref:`halley ` + + `scipy.optimize.linprog` + + - :ref:`simplex ` + - :ref:`interior-point ` + - :ref:`revised simplex ` + - :ref:`highs ` + - :ref:`highs-ds ` + - :ref:`highs-ipm ` + + `scipy.optimize.quadratic_assignment` + + - :ref:`faq ` + - :ref:`2opt ` + + Examples + -------- + We can print documentations of a solver in stdout: + + >>> from scipy.optimize import show_options + >>> show_options(solver="minimize") + ... + + Specifying a method is possible: + + >>> show_options(solver="minimize", method="Nelder-Mead") + ... + + We can also get the documentations as a string: + + >>> show_options(solver="minimize", method="Nelder-Mead", disp=False) + Minimization of scalar function of one or more variables using the ... + + """ + import textwrap + + doc_routines = { + 'minimize': ( + ('bfgs', 'scipy.optimize._optimize._minimize_bfgs'), + ('cg', 'scipy.optimize._optimize._minimize_cg'), + ('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'), + ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), + ('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'), + ('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'), + ('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'), + ('powell', 'scipy.optimize._optimize._minimize_powell'), + ('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'), + ('tnc', 'scipy.optimize._tnc._minimize_tnc'), + ('trust-ncg', + 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), + ('trust-constr', + 'scipy.optimize._trustregion_constr.' + '_minimize_trustregion_constr'), + ('trust-exact', + 'scipy.optimize._trustregion_exact._minimize_trustregion_exact'), + ('trust-krylov', + 'scipy.optimize._trustregion_krylov._minimize_trust_krylov'), + ), + 'root': ( + ('hybr', 'scipy.optimize._minpack_py._root_hybr'), + ('lm', 'scipy.optimize._root._root_leastsq'), + ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), + ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), + ('anderson', 'scipy.optimize._root._root_anderson_doc'), + ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), + ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), + ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), + ('krylov', 'scipy.optimize._root._root_krylov_doc'), + ('df-sane', 'scipy.optimize._spectral._root_df_sane'), + ), + 'root_scalar': ( + ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'), + ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'), + ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'), + ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'), + ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'), + ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'), + ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'), + ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'), + ), + 'linprog': ( + ('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'), + ('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'), + ('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'), + ('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'), + ('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'), + ('highs', 'scipy.optimize._linprog._linprog_highs_doc'), + ), + 'quadratic_assignment': ( + ('faq', 'scipy.optimize._qap._quadratic_assignment_faq'), + ('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'), + ), + 'minimize_scalar': ( + ('brent', 'scipy.optimize._optimize._minimize_scalar_brent'), + ('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'), + ('golden', 'scipy.optimize._optimize._minimize_scalar_golden'), + ), + } + + if solver is None: + text = ["\n\n\n========\n", "minimize\n", "========\n"] + text.append(show_options('minimize', disp=False)) + text.extend(["\n\n===============\n", "minimize_scalar\n", + "===============\n"]) + text.append(show_options('minimize_scalar', disp=False)) + text.extend(["\n\n\n====\n", "root\n", + "====\n"]) + text.append(show_options('root', disp=False)) + text.extend(['\n\n\n=======\n', 'linprog\n', + '=======\n']) + text.append(show_options('linprog', disp=False)) + text = "".join(text) + else: + solver = solver.lower() + if solver not in doc_routines: + raise ValueError(f'Unknown solver {solver!r}') + + if method is None: + text = [] + for name, _ in doc_routines[solver]: + text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) + text.append(show_options(solver, name, disp=False)) + text = "".join(text) + else: + method = method.lower() + methods = dict(doc_routines[solver]) + if method not in methods: + raise ValueError(f"Unknown method {method!r}") + name = methods[method] + + # Import function object + parts = name.split('.') + mod_name = ".".join(parts[:-1]) + __import__(mod_name) + obj = getattr(sys.modules[mod_name], parts[-1]) + + # Get doc + doc = obj.__doc__ + if doc is not None: + text = textwrap.dedent(doc).strip() + else: + text = "" + + if disp: + print(text) + return + else: + return text diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f28cf6012007f56f5e8d1c9d057f042b4465aec6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root.py new file mode 100644 index 0000000000000000000000000000000000000000..613ccd82a32ef08f90a65b92ea61b597e8d8113f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root.py @@ -0,0 +1,711 @@ +""" +Unified interfaces to root finding algorithms. + +Functions +--------- +- root : find a root of a vector function. +""" +__all__ = ['root'] + +import numpy as np + +from warnings import warn + +from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options +from ._minpack_py import _root_hybr, leastsq +from ._spectral import _root_df_sane +from . import _nonlin as nonlin + + +ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', + 'df-sane'] + + +def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, + options=None): + r""" + Find a root of a vector function. + + Parameters + ---------- + fun : callable + A vector function to find a root of. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function and its Jacobian. + method : str, optional + Type of solver. Should be one of + + - 'hybr' :ref:`(see here) ` + - 'lm' :ref:`(see here) ` + - 'broyden1' :ref:`(see here) ` + - 'broyden2' :ref:`(see here) ` + - 'anderson' :ref:`(see here) ` + - 'linearmixing' :ref:`(see here) ` + - 'diagbroyden' :ref:`(see here) ` + - 'excitingmixing' :ref:`(see here) ` + - 'krylov' :ref:`(see here) ` + - 'df-sane' :ref:`(see here) ` + + jac : bool or callable, optional + If `jac` is a Boolean and is True, `fun` is assumed to return the + value of Jacobian along with the objective function. If False, the + Jacobian will be estimated numerically. + `jac` can also be a callable returning the Jacobian of `fun`. In + this case, it must accept the same arguments as `fun`. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. For all methods but 'hybr' and 'lm'. + options : dict, optional + A dictionary of solver options. E.g., `xtol` or `maxiter`, see + :obj:`show_options()` for details. + + Returns + ------- + sol : OptimizeResult + The solution represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the algorithm exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *hybr*. + + Method *hybr* uses a modification of the Powell hybrid method as + implemented in MINPACK [1]_. + + Method *lm* solves the system of nonlinear equations in a least squares + sense using a modification of the Levenberg-Marquardt algorithm as + implemented in MINPACK [1]_. + + Method *df-sane* is a derivative-free spectral method. [3]_ + + Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, + *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, + with backtracking or full line searches [2]_. Each method corresponds + to a particular Jacobian approximations. + + - Method *broyden1* uses Broyden's first Jacobian approximation, it is + known as Broyden's good method. + - Method *broyden2* uses Broyden's second Jacobian approximation, it + is known as Broyden's bad method. + - Method *anderson* uses (extended) Anderson mixing. + - Method *Krylov* uses Krylov approximation for inverse Jacobian. It + is suitable for large-scale problem. + - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. + - Method *linearmixing* uses a scalar Jacobian approximation. + - Method *excitingmixing* uses a tuned diagonal Jacobian + approximation. + + .. warning:: + + The algorithms implemented for methods *diagbroyden*, + *linearmixing* and *excitingmixing* may be useful for specific + problems, but whether they will work may depend strongly on the + problem. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. + 1980. User Guide for MINPACK-1. + .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear + Equations. Society for Industrial and Applied Mathematics. + + .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). + + Examples + -------- + The following functions define a system of nonlinear equations and its + jacobian. + + >>> import numpy as np + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + >>> def jac(x): + ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, + ... -1.5 * (x[0] - x[1])**2], + ... [-1.5 * (x[1] - x[0])**2, + ... 1 + 1.5 * (x[1] - x[0])**2]]) + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') + >>> sol.x + array([ 0.8411639, 0.1588361]) + + **Large problem** + + Suppose that we needed to solve the following integrodifferential + equation on the square :math:`[0,1]\times[0,1]`: + + .. math:: + + \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 + + with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of + the square. + + The solution can be found using the ``method='krylov'`` solver: + + >>> from scipy import optimize + >>> # parameters + >>> nx, ny = 75, 75 + >>> hx, hy = 1./(nx-1), 1./(ny-1) + + >>> P_left, P_right = 0, 0 + >>> P_top, P_bottom = 1, 0 + + >>> def residual(P): + ... d2x = np.zeros_like(P) + ... d2y = np.zeros_like(P) + ... + ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx + ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx + ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx + ... + ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy + ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy + ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy + ... + ... return d2x + d2y - 10*np.cosh(P).mean()**2 + + >>> guess = np.zeros((nx, ny), float) + >>> sol = optimize.root(residual, guess, method='krylov') + >>> print('Residual: %g' % abs(residual(sol.x)).max()) + Residual: 5.7972e-06 # may vary + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)] + >>> plt.pcolormesh(x, y, sol.x, shading='gouraud') + >>> plt.colorbar() + >>> plt.show() + + """ + if not isinstance(args, tuple): + args = (args,) + + meth = method.lower() + if options is None: + options = {} + + if callback is not None and meth in ('hybr', 'lm'): + warn('Method %s does not accept callback.' % method, + RuntimeWarning, stacklevel=2) + + # fun also returns the Jacobian + if not callable(jac) and meth in ('hybr', 'lm'): + if bool(jac): + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth in ('hybr', 'lm'): + options.setdefault('xtol', tol) + elif meth in ('df-sane',): + options.setdefault('ftol', tol) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + options.setdefault('xtol', tol) + options.setdefault('xatol', np.inf) + options.setdefault('ftol', np.inf) + options.setdefault('fatol', np.inf) + + if meth == 'hybr': + sol = _root_hybr(fun, x0, args=args, jac=jac, **options) + elif meth == 'lm': + sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) + elif meth == 'df-sane': + _warn_jac_unused(jac, method) + sol = _root_df_sane(fun, x0, args=args, callback=callback, + **options) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + _warn_jac_unused(jac, method) + sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, + _method=meth, _callback=callback, + **options) + else: + raise ValueError('Unknown solver %s' % method) + + return sol + + +def _warn_jac_unused(jac, method): + if jac is not None: + warn(f'Method {method} does not use the jacobian (jac).', + RuntimeWarning, stacklevel=2) + + +def _root_leastsq(fun, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, + gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, + **unknown_options): + """ + Solve for least squares with Levenberg-Marquardt + + Options + ------- + col_deriv : bool + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float + Relative error desired in the sum of squares. + xtol : float + Relative error desired in the approximate solution. + gtol : float + Orthogonality desired between the function vector and the columns + of the Jacobian. + maxiter : int + The maximum number of calls to the function. If zero, then + 100*(N+1) is the maximum where N is the number of elements in x0. + epsfcn : float + A suitable step length for the forward-difference approximation of + the Jacobian (for Dfun=None). If epsfcn is less than the machine + precision, it is assumed that the relative errors in the functions + are of the order of the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the variables. + """ + + _check_unknown_options(unknown_options) + x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac, + full_output=True, + col_deriv=col_deriv, xtol=xtol, + ftol=ftol, gtol=gtol, + maxfev=maxiter, epsfcn=eps, + factor=factor, diag=diag) + sol = OptimizeResult(x=x, message=msg, status=ier, + success=ier in (1, 2, 3, 4), cov_x=cov_x, + fun=info.pop('fvec'), method="lm") + sol.update(info) + return sol + + +def _root_nonlin_solve(fun, x0, args=(), jac=None, + _callback=None, _method=None, + nit=None, disp=False, maxiter=None, + ftol=None, fatol=None, xtol=None, xatol=None, + tol_norm=None, line_search='armijo', jac_options=None, + **unknown_options): + _check_unknown_options(unknown_options) + + f_tol = fatol + f_rtol = ftol + x_tol = xatol + x_rtol = xtol + verbose = disp + if jac_options is None: + jac_options = dict() + + jacobian = {'broyden1': nonlin.BroydenFirst, + 'broyden2': nonlin.BroydenSecond, + 'anderson': nonlin.Anderson, + 'linearmixing': nonlin.LinearMixing, + 'diagbroyden': nonlin.DiagBroyden, + 'excitingmixing': nonlin.ExcitingMixing, + 'krylov': nonlin.KrylovJacobian + }[_method] + + if args: + if jac is True: + def f(x): + return fun(x, *args)[0] + else: + def f(x): + return fun(x, *args) + else: + f = fun + + x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), + iter=nit, verbose=verbose, + maxiter=maxiter, f_tol=f_tol, + f_rtol=f_rtol, x_tol=x_tol, + x_rtol=x_rtol, tol_norm=tol_norm, + line_search=line_search, + callback=_callback, full_output=True, + raise_exception=False) + sol = OptimizeResult(x=x, method=_method) + sol.update(info) + return sol + +def _root_broyden1_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart`` + Drop all matrix columns. Has no + extra parameters. + - ``simple`` + Drop oldest matrix column. Has no + extra parameters. + - ``svd`` + Keep only the most significant SVD + components. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + + Examples + -------- + >>> def func(x): + ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] + ... + >>> from scipy import optimize + >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14) + >>> x = res.x + >>> x + array([4.04674914, 3.91158389, 2.71791677, 1.61756251]) + >>> np.cos(x) + x[::-1] + array([1., 2., 3., 4.]) + + """ + pass + +def _root_broyden2_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + + - ``restart`` + Drop all matrix columns. Has no + extra parameters. + - ``simple`` + Drop oldest matrix column. Has no + extra parameters. + - ``svd`` + Keep only the most significant SVD + components. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """ + pass + +def _root_anderson_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + """ + pass + +def _root_linearmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_diagbroyden_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_excitingmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + """ + pass + +def _root_krylov_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=jac.inverse). + + If the preconditioner has a method named 'update', it will + be called as ``update(x, f)`` after each nonlinear step, + with ``x`` giving the current point, and ``f`` the current + function value. + inner_tol, inner_maxiter, ... + Parameters to pass on to the "inner" Krylov solver. + See `scipy.sparse.linalg.gmres` for details. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear + iterations. + + See `scipy.sparse.linalg.lgmres` for details. + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py new file mode 100644 index 0000000000000000000000000000000000000000..550098bbe677825b34e19aec29e340143b3522cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py @@ -0,0 +1,525 @@ +""" +Unified interfaces to root finding algorithms for real or complex +scalar functions. + +Functions +--------- +- root : find a root of a scalar function. +""" +import numpy as np + +from . import _zeros_py as optzeros +from ._numdiff import approx_derivative + +__all__ = ['root_scalar'] + +ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748', + 'newton', 'secant', 'halley'] + + +class MemoizeDer: + """Decorator that caches the value and derivative(s) of function each + time it is called. + + This is a simplistic memoizer that calls and caches a single value + of `f(x, *args)`. + It assumes that `args` does not change between invocations. + It supports the use case of a root-finder where `args` is fixed, + `x` changes, and only rarely, if at all, does x assume the same value + more than once.""" + def __init__(self, fun): + self.fun = fun + self.vals = None + self.x = None + self.n_calls = 0 + + def __call__(self, x, *args): + r"""Calculate f or use cached value if available""" + # Derivative may be requested before the function itself, always check + if self.vals is None or x != self.x: + fg = self.fun(x, *args) + self.x = x + self.n_calls += 1 + self.vals = fg[:] + return self.vals[0] + + def fprime(self, x, *args): + r"""Calculate f' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[1] + + def fprime2(self, x, *args): + r"""Calculate f'' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[2] + + def ncalls(self): + return self.n_calls + + +def root_scalar(f, args=(), method=None, bracket=None, + fprime=None, fprime2=None, + x0=None, x1=None, + xtol=None, rtol=None, maxiter=None, + options=None): + """ + Find a root of a scalar function. + + Parameters + ---------- + f : callable + A function to find a root of. + args : tuple, optional + Extra arguments passed to the objective function and its derivative(s). + method : str, optional + Type of solver. Should be one of + + - 'bisect' :ref:`(see here) ` + - 'brentq' :ref:`(see here) ` + - 'brenth' :ref:`(see here) ` + - 'ridder' :ref:`(see here) ` + - 'toms748' :ref:`(see here) ` + - 'newton' :ref:`(see here) ` + - 'secant' :ref:`(see here) ` + - 'halley' :ref:`(see here) ` + + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + x0 : float, optional + Initial guess. + x1 : float, optional + A second guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the derivative. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, optional + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of the objective function and of the + first and second derivatives. + `fprime2` can also be a callable returning the second derivative of `f`. + In this case, it must accept the same arguments as `f`. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options : dict, optional + A dictionary of solver options. E.g., ``k``, see + :obj:`show_options()` for details. + + Returns + ------- + sol : RootResults + The solution represented as a ``RootResults`` object. + Important attributes are: ``root`` the solution , ``converged`` a + boolean flag indicating if the algorithm exited successfully and + ``flag`` which describes the cause of the termination. See + `RootResults` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + root : Find a root of a vector function. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + The default is to use the best method available for the situation + presented. + If a bracket is provided, it may use one of the bracketing methods. + If a derivative and an initial value are specified, it may + select one of the derivative-based methods. + If no method is judged applicable, it will raise an Exception. + + Arguments for each method are as follows (x=required, o=optional). + + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options | + +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+ + | :ref:`bisect ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brentq ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`brenth ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`ridder ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`toms748 ` | x | o | x | | | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`secant ` | x | o | | x | o | | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`newton ` | x | o | | x | | o | | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + | :ref:`halley ` | x | o | | x | | x | x | o | o | o | o | + +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ + + Examples + -------- + + Find the root of a simple cubic + + >>> from scipy import optimize + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> def fprime(x): + ... return 3*x**2 + + The `brentq` method takes as input a bracket + + >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 10, 11) + + The `newton` method takes as input a single point and uses the + derivative(s). + + >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 22) + + The function can provide the value and derivative(s) in a single call. + + >>> def f_p_pp(x): + ... return (x**3 - 1), 3*x**2, 6*x + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, method='newton' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 11) + + >>> sol = optimize.root_scalar( + ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley' + ... ) + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 7, 8) + + + """ # noqa: E501 + if not isinstance(args, tuple): + args = (args,) + + if options is None: + options = {} + + # fun also returns the derivative(s) + is_memoized = False + if fprime2 is not None and not callable(fprime2): + if bool(fprime2): + f = MemoizeDer(f) + is_memoized = True + fprime2 = f.fprime2 + fprime = f.fprime + else: + fprime2 = None + if fprime is not None and not callable(fprime): + if bool(fprime): + f = MemoizeDer(f) + is_memoized = True + fprime = f.fprime + else: + fprime = None + + # respect solver-specific default tolerances - only pass in if actually set + kwargs = {} + for k in ['xtol', 'rtol', 'maxiter']: + v = locals().get(k) + if v is not None: + kwargs[k] = v + + # Set any solver-specific options + if options: + kwargs.update(options) + # Always request full_output from the underlying method as _root_scalar + # always returns a RootResults object + kwargs.update(full_output=True, disp=False) + + # Pick a method if not specified. + # Use the "best" method available for the situation. + if not method: + if bracket: + method = 'brentq' + elif x0 is not None: + if fprime: + if fprime2: + method = 'halley' + else: + method = 'newton' + elif x1 is not None: + method = 'secant' + else: + method = 'newton' + if not method: + raise ValueError('Unable to select a solver as neither bracket ' + 'nor starting point provided.') + + meth = method.lower() + map2underlying = {'halley': 'newton', 'secant': 'newton'} + + try: + methodc = getattr(optzeros, map2underlying.get(meth, meth)) + except AttributeError as e: + raise ValueError('Unknown solver %s' % meth) from e + + if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']: + if not isinstance(bracket, (list, tuple, np.ndarray)): + raise ValueError('Bracket needed for %s' % method) + + a, b = bracket[:2] + try: + r, sol = methodc(f, a, b, args=args, **kwargs) + except ValueError as e: + # gh-17622 fixed some bugs in low-level solvers by raising an error + # (rather than returning incorrect results) when the callable + # returns a NaN. It did so by wrapping the callable rather than + # modifying compiled code, so the iteration count is not available. + if hasattr(e, "_x"): + sol = optzeros.RootResults(root=e._x, + iterations=np.nan, + function_calls=e._function_calls, + flag=str(e), method=method) + else: + raise + + elif meth in ['secant']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None, + x1=x1, **kwargs) + elif meth in ['newton']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + # approximate fprime with finite differences + + def fprime(x, *args): + # `root_scalar` doesn't actually seem to support vectorized + # use of `newton`. In that case, `approx_derivative` will + # always get scalar input. Nonetheless, it always returns an + # array, so we extract the element to produce scalar output. + return approx_derivative(f, x, method='2-point', args=args)[0] + + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None, + **kwargs) + elif meth in ['halley']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + raise ValueError('fprime must be specified for %s' % method) + if not fprime2: + raise ValueError('fprime2 must be specified for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs) + else: + raise ValueError('Unknown solver %s' % method) + + if is_memoized: + # Replace the function_calls count with the memoized count. + # Avoids double and triple-counting. + n_calls = f.n_calls + sol.function_calls = n_calls + + return sol + + +def _root_scalar_brentq_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_brenth_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + +def _root_scalar_toms748_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_secant_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + x1 : float, required + A second guess. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_newton_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivative. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_halley_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivatives. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, required + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, required + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of 1st and 2nd derivatives along with the objective function. + `fprime2` can also be a callable returning the 2nd derivative of `f`. + In this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_ridder_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass + + +def _root_scalar_bisect_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above. + + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo.py new file mode 100644 index 0000000000000000000000000000000000000000..61d686d02afdffc01e0d66d538c16de9cb02a0a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo.py @@ -0,0 +1,1595 @@ +"""shgo: The simplicial homology global optimisation algorithm.""" +from collections import namedtuple +import time +import logging +import warnings +import sys + +import numpy as np + +from scipy import spatial +from scipy.optimize import OptimizeResult, minimize, Bounds +from scipy.optimize._optimize import MemoizeJac +from scipy.optimize._constraints import new_bounds_to_old +from scipy.optimize._minimize import standardize_constraints +from scipy._lib._util import _FunctionWrapper + +from scipy.optimize._shgo_lib._complex import Complex + +__all__ = ['shgo'] + + +def shgo( + func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, + minimizer_kwargs=None, options=None, sampling_method='simplicial', *, + workers=1 +): + """ + Finds the global minimum of a function using SHG optimization. + + SHGO stands for "simplicial homology global optimization". + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition. Only for COBYLA, SLSQP and trust-constr. + See the tutorial [5]_ for further details on specifying constraints. + + .. note:: + + Only COBYLA, SLSQP, and trust-constr local minimize methods + currently support constraint arguments. If the ``constraints`` + sequence used in the local optimization problem is not defined in + ``minimizer_kwargs`` and a constrained method is used then the + global ``constraints`` will be used. + (Defining a ``constraints`` sequence in ``minimizer_kwargs`` + means that ``constraints`` will not be added so if equality + constraints and so forth need to be added then the inequality + functions in ``constraints`` need to be added to + ``minimizer_kwargs`` too). + COBYLA only supports inequality constraints. + + .. versionchanged:: 1.11.0 + + ``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`. + + n : int, optional + Number of sampling points used in the construction of the simplicial + complex. For the default ``simplicial`` sampling method 2**dim + 1 + sampling points are generated instead of the default `n=100`. For all + other specified values `n` sampling points are generated. For + ``sobol``, ``halton`` and other arbitrary `sampling_methods` `n=100` or + another specified number of sampling points are generated. + iters : int, optional + Number of iterations used in the construction of the simplicial + complex. Default is 1. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the minimizer + ``scipy.optimize.minimize`` Some important options could be: + + * method : str + The minimization method. If not given, chosen to be one of + BFGS, L-BFGS-B, SLSQP, depending on whether or not the + problem has constraints or bounds. + * args : tuple + Extra arguments passed to the objective function (``func``) and + its derivatives (Jacobian, Hessian). + * options : dict, optional + Note that by default the tolerance is specified as + ``{ftol: 1e-12}`` + + options : dict, optional + A dictionary of solver options. Many of the options specified for the + global routine are also passed to the scipy.optimize.minimize routine. + The options that are also passed to the local routine are marked with + "(L)". + + Stopping criteria, the algorithm will terminate if any of the specified + criteria are met. However, the default algorithm does not require any + to be specified: + + * maxfev : int (L) + Maximum number of function evaluations in the feasible domain. + (Note only methods that support this option will terminate + the routine at precisely exact specified value. Otherwise the + criterion will only terminate during a global iteration) + * f_min + Specify the minimum objective function value, if it is known. + * f_tol : float + Precision goal for the value of f in the stopping + criterion. Note that the global routine will also + terminate if a sampling point in the global routine is + within this tolerance. + * maxiter : int + Maximum number of iterations to perform. + * maxev : int + Maximum number of sampling evaluations to perform (includes + searching in infeasible points). + * maxtime : float + Maximum processing runtime allowed + * minhgrd : int + Minimum homology group rank differential. The homology group of the + objective function is calculated (approximately) during every + iteration. The rank of this group has a one-to-one correspondence + with the number of locally convex subdomains in the objective + function (after adequate sampling points each of these subdomains + contain a unique global minimum). If the difference in the hgr is 0 + between iterations for ``maxhgrd`` specified iterations the + algorithm will terminate. + + Objective function knowledge: + + * symmetry : list or bool + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. If `True` is specified + then all variables will be set symmetric to the first variable. + Default + is set to False. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + * jac : bool or callable, optional + Jacobian (gradient) of objective function. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a + boolean and is True, ``fun`` is assumed to return the gradient + along with the objective function. If False, the gradient will be + estimated numerically. ``jac`` can also be a callable returning the + gradient of the objective. In this case, it must accept the same + arguments as ``fun``. (Passed to `scipy.optimize.minimize` + automatically) + + * hess, hessp : callable, optional + Hessian (matrix of second-order derivatives) of objective function + or Hessian of objective function times an arbitrary vector p. + Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or + ``hess`` needs to be given. If ``hess`` is provided, then + ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is + provided, then the Hessian product will be approximated using + finite differences on ``jac``. ``hessp`` must compute the Hessian + times an arbitrary vector. (Passed to `scipy.optimize.minimize` + automatically) + + Algorithm settings: + + * minimize_every_iter : bool + If True then promising global sampling points will be passed to a + local minimization routine every iteration. If True then only the + final minimizer pool will be run. Defaults to True. + * local_iter : int + Only evaluate a few of the best minimizer pool candidates every + iteration. If False all potential points are passed to the local + minimization routine. + * infty_constraints : bool + If True then any sampling points generated which are outside will + the feasible domain will be saved and given an objective function + value of ``inf``. If False then these points will be discarded. + Using this functionality could lead to higher performance with + respect to function evaluations before the global minimum is found, + specifying False will use less memory at the cost of a slight + decrease in performance. Defaults to True. + + Feedback: + + * disp : bool (L) + Set to True to print convergence messages. + + sampling_method : str or function, optional + Current built in sampling method options are ``halton``, ``sobol`` and + ``simplicial``. The default ``simplicial`` provides + the theoretical guarantee of convergence to the global minimum in + finite time. ``halton`` and ``sobol`` method are faster in terms of + sampling point generation at the cost of the loss of + guaranteed convergence. It is more appropriate for most "easier" + problems where the convergence is relatively fast. + User defined sampling functions must accept two arguments of ``n`` + sampling points of dimension ``dim`` per call and output an array of + sampling points with shape `n x dim`. + + workers : int or map-like callable, optional + Sample and run the local serial minimizations in parallel. + Supply -1 to use all available CPU cores, or an int to use + that many Processes (uses `multiprocessing.Pool `). + + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for parallel evaluation. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.11.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: + ``x`` the solution array corresponding to the global minimum, + ``fun`` the function output at the global solution, + ``xl`` an ordered list of local minima solutions, + ``funl`` the function output at the corresponding local solutions, + ``success`` a Boolean flag indicating if the optimizer exited + successfully, + ``message`` which describes the cause of the termination, + ``nfev`` the total number of objective function evaluations including + the sampling calls, + ``nlfev`` the total number of objective function evaluations + culminating from all local search optimizations, + ``nit`` number of iterations performed by the global routine. + + Notes + ----- + Global optimization using simplicial homology global optimization [1]_. + Appropriate for solving general purpose NLP and blackbox optimization + problems to global optimality (low-dimensional problems). + + In general, the optimization problems are of the form:: + + minimize f(x) subject to + + g_i(x) >= 0, i = 1,...,m + h_j(x) = 0, j = 1,...,p + + where x is a vector of one or more variables. ``f(x)`` is the objective + function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and + ``h_j(x)`` are the equality constraints. + + Optionally, the lower and upper bounds for each element in x can also be + specified using the `bounds` argument. + + While most of the theoretical advantages of SHGO are only proven for when + ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to + converge to the global optimum for the more general case where ``f(x)`` is + non-continuous, non-convex and non-smooth, if the default sampling method + is used [1]_. + + The local search method may be specified using the ``minimizer_kwargs`` + parameter which is passed on to ``scipy.optimize.minimize``. By default, + the ``SLSQP`` method is used. In general, it is recommended to use the + ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints + are defined for the problem since the other methods do not use constraints. + + The ``halton`` and ``sobol`` method points are generated using + `scipy.stats.qmc`. Any other QMC method could be used. + + References + ---------- + .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology + algorithm for lipschitz optimisation", Journal of Global + Optimization. + .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with + better two-dimensional projections", SIAM J. Sci. Comput. 30, + 2635-2654. + .. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear + programming codes", Lecture Notes in Economics and Mathematical + Systems, 187. Springer-Verlag, New York. + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and + dynamics from the potential energy landscape", + Journal of Chemical Physics, 142(13), 2015. + .. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize + + Examples + -------- + First consider the problem of minimizing the Rosenbrock function, `rosen`: + + >>> from scipy.optimize import rosen, shgo + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = shgo(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18) + + Note that bounds determine the dimensionality of the objective + function and is therefore a required input, however you can specify + empty bounds using ``None`` or objects like ``np.inf`` which will be + converted to large float numbers. + + >>> bounds = [(None, None), ]*4 + >>> result = shgo(rosen, bounds) + >>> result.x + array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ]) + + Next, we consider the Eggholder function, a problem with several local + minima and one global minimum. We will demonstrate the use of arguments and + the capabilities of `shgo`. + (https://en.wikipedia.org/wiki/Test_functions_for_optimization) + + >>> import numpy as np + >>> def eggholder(x): + ... return (-(x[1] + 47.0) + ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) + ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ... ) + ... + >>> bounds = [(-512, 512), (-512, 512)] + + `shgo` has built-in low discrepancy sampling sequences. First, we will + input 64 initial sampling points of the *Sobol'* sequence: + + >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol') + >>> result.x, result.fun + (array([512. , 404.23180824]), -959.6406627208397) + + `shgo` also has a return for any other local minima that was found, these + can be called using: + + >>> result.xl + array([[ 512. , 404.23180824], + [ 283.0759062 , -487.12565635], + [-294.66820039, -462.01964031], + [-105.87688911, 423.15323845], + [-242.97926 , 274.38030925], + [-506.25823477, 6.3131022 ], + [-408.71980731, -156.10116949], + [ 150.23207937, 301.31376595], + [ 91.00920901, -391.283763 ], + [ 202.89662724, -269.38043241], + [ 361.66623976, -106.96493868], + [-219.40612786, -244.06020508]]) + + >>> result.funl + array([-959.64066272, -718.16745962, -704.80659592, -565.99778097, + -559.78685655, -557.36868733, -507.87385942, -493.9605115 , + -426.48799655, -421.15571437, -419.31194957, -410.98477763]) + + These results are useful in applications where there are many global minima + and the values of other global minima are desired or where the local minima + can provide insight into the system (for example morphologies + in physical chemistry [4]_). + + If we want to find a larger number of local minima, we can increase the + number of sampling points or the number of iterations. We'll increase the + number of sampling points to 64 and the number of iterations from the + default of 1 to 3. Using ``simplicial`` this would have given us + 64 x 3 = 192 initial sampling points. + + >>> result_2 = shgo(eggholder, + ... bounds, n=64, iters=3, sampling_method='sobol') + >>> len(result.xl), len(result_2.xl) + (12, 23) + + Note the difference between, e.g., ``n=192, iters=1`` and ``n=64, + iters=3``. + In the first case the promising points contained in the minimiser pool + are processed only once. In the latter case it is processed every 64 + sampling points for a total of 3 times. + + To demonstrate solving problems with non-linear constraints consider the + following example from Hock and Schittkowski problem 73 (cattle-feed) + [3]_:: + + minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4 + + subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0, + + 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21 + -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 + + 20.5 * x_3**2 + 0.62 * x_4**2) >= 0, + + x_1 + x_2 + x_3 + x_4 - 1 == 0, + + 1 >= x_i >= 0 for all i + + The approximate answer given in [3]_ is:: + + f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378 + + >>> def f(x): # (cattle-feed) + ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3] + ... + >>> def g1(x): + ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0 + ... + >>> def g2(x): + ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21 + ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2 + ... + 20.5*x[2]**2 + 0.62*x[3]**2) + ... ) # >=0 + ... + >>> def h1(x): + ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + ... + >>> cons = ({'type': 'ineq', 'fun': g1}, + ... {'type': 'ineq', 'fun': g2}, + ... {'type': 'eq', 'fun': h1}) + >>> bounds = [(0, 1.0),]*4 + >>> res = shgo(f, bounds, n=150, constraints=cons) + >>> res + message: Optimization terminated successfully. + success: True + fun: 29.894378159142136 + funl: [ 2.989e+01] + x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] # may vary + xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] # may vary + nit: 1 + nfev: 142 # may vary + nlfev: 35 # may vary + nljev: 5 + nlhev: 0 + + >>> g1(res.x), g2(res.x), h1(res.x) + (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0) + + """ + # if necessary, convert bounds class to old bounds + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + # Initiate SHGO class + # use in context manager to make sure that any parallelization + # resources are freed. + with SHGO(func, bounds, args=args, constraints=constraints, n=n, + iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, + options=options, sampling_method=sampling_method, + workers=workers) as shc: + # Run the algorithm, process results and test success + shc.iterate_all() + + if not shc.break_routine: + if shc.disp: + logging.info("Successfully completed construction of complex.") + + # Test post iterations success + if len(shc.LMC.xl_maps) == 0: + # If sampling failed to find pool, return lowest sampled point + # with a warning + shc.find_lowest_vertex() + shc.break_routine = True + shc.fail_routine(mes="Failed to find a feasible minimizer point. " + f"Lowest sampling point = {shc.f_lowest}") + shc.res.fun = shc.f_lowest + shc.res.x = shc.x_lowest + shc.res.nfev = shc.fn + shc.res.tnev = shc.n_sampled + else: + # Test that the optimal solutions do not violate any constraints + pass # TODO + + # Confirm the routine ran successfully + if not shc.break_routine: + shc.res.message = 'Optimization terminated successfully.' + shc.res.success = True + + # Return the final results + return shc.res + + +class SHGO: + def __init__(self, func, bounds, args=(), constraints=None, n=None, + iters=None, callback=None, minimizer_kwargs=None, + options=None, sampling_method='simplicial', workers=1): + from scipy.stats import qmc + # Input checks + methods = ['halton', 'sobol', 'simplicial'] + if isinstance(sampling_method, str) and sampling_method not in methods: + raise ValueError(("Unknown sampling_method specified." + " Valid methods: {}").format(', '.join(methods))) + + # Split obj func if given with Jac + try: + if ((minimizer_kwargs['jac'] is True) and + (not callable(minimizer_kwargs['jac']))): + self.func = MemoizeJac(func) + jac = self.func.derivative + minimizer_kwargs['jac'] = jac + func = self.func # .fun + else: + self.func = func # Normal definition of objective function + except (TypeError, KeyError): + self.func = func # Normal definition of objective function + + # Initiate class + self.func = _FunctionWrapper(func, args) + self.bounds = bounds + self.args = args + self.callback = callback + + # Bounds + abound = np.array(bounds, float) + self.dim = np.shape(abound)[0] # Dimensionality of problem + + # Set none finite values to large floats + infind = ~np.isfinite(abound) + abound[infind[:, 0], 0] = -1e50 + abound[infind[:, 1], 1] = 1e50 + + # Check if bounds are correctly specified + bnderr = abound[:, 0] > abound[:, 1] + if bnderr.any(): + raise ValueError('Error: lb > ub in bounds {}.' + .format(', '.join(str(b) for b in bnderr))) + + self.bounds = abound + + # Constraints + # Process constraint dict sequence: + self.constraints = constraints + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + + # shgo internals deals with old-style constraints + # self.constraints is used to create Complex, so need + # to be stored internally in old-style. + # `minimize` takes care of normalising these constraints + # for slsqp/cobyla/trust-constr. + self.constraints = standardize_constraints( + constraints, + np.empty(self.dim, float), + 'old' + ) + for cons in self.constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Define local minimization keyword arguments + # Start with defaults + self.minimizer_kwargs = {'method': 'SLSQP', + 'bounds': self.bounds, + 'options': {}, + 'callback': self.callback + } + if minimizer_kwargs is not None: + # Overwrite with supplied values + self.minimizer_kwargs.update(minimizer_kwargs) + + else: + self.minimizer_kwargs['options'] = {'ftol': 1e-12} + + if ( + self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla', + 'trust-constr') + and ( + minimizer_kwargs is not None and + 'constraints' not in minimizer_kwargs and + constraints is not None + ) or + (self.g_cons is not None) + ): + self.minimizer_kwargs['constraints'] = self.min_cons + + # Process options dict + if options is not None: + self.init_options(options) + else: # Default settings: + self.f_min_true = None + self.minimize_every_iter = True + + # Algorithm limits + self.maxiter = None + self.maxfev = None + self.maxev = None + self.maxtime = None + self.f_min_true = None + self.minhgrd = None + + # Objective function knowledge + self.symmetry = None + + # Algorithm functionality + self.infty_cons_sampl = True + self.local_iter = False + + # Feedback + self.disp = False + + # Remove unknown arguments in self.minimizer_kwargs + # Start with arguments all the solvers have in common + self.min_solver_args = ['fun', 'x0', 'args', + 'callback', 'options', 'method'] + # then add the ones unique to specific solvers + solver_args = { + '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'], + 'nelder-mead': [], + 'powell': [], + 'cg': ['jac'], + 'bfgs': ['jac'], + 'newton-cg': ['jac', 'hess', 'hessp'], + 'l-bfgs-b': ['jac', 'bounds'], + 'tnc': ['jac', 'bounds'], + 'cobyla': ['constraints', 'catol'], + 'slsqp': ['jac', 'bounds', 'constraints'], + 'dogleg': ['jac', 'hess'], + 'trust-ncg': ['jac', 'hess', 'hessp'], + 'trust-krylov': ['jac', 'hess', 'hessp'], + 'trust-exact': ['jac', 'hess'], + 'trust-constr': ['jac', 'hess', 'hessp', 'constraints'], + } + method = self.minimizer_kwargs['method'] + self.min_solver_args += solver_args[method.lower()] + + # Only retain the known arguments + def _restrict_to_keys(dictionary, goodkeys): + """Remove keys from dictionary if not in goodkeys - inplace""" + existingkeys = set(dictionary) + for key in existingkeys - set(goodkeys): + dictionary.pop(key, None) + + _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args) + _restrict_to_keys(self.minimizer_kwargs['options'], + self.min_solver_args + ['ftol']) + + # Algorithm controls + # Global controls + self.stop_global = False # Used in the stopping_criteria method + self.break_routine = False # Break the algorithm globally + self.iters = iters # Iterations to be ran + self.iters_done = 0 # Iterations completed + self.n = n # Sampling points per iteration + self.nc = 0 # n # Sampling points to sample in current iteration + self.n_prc = 0 # Processed points (used to track Delaunay iters) + self.n_sampled = 0 # To track no. of sampling points already generated + self.fn = 0 # Number of feasible sampling points evaluations performed + self.hgr = 0 # Homology group rank + # Initially attempt to build the triangulation incrementally: + self.qhull_incremental = True + + # Default settings if no sampling criteria. + if (self.n is None) and (self.iters is None) \ + and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + self.nc = 0 # self.n + if self.iters is None: + self.iters = 1 + if (self.n is None) and not (sampling_method == 'simplicial'): + self.n = self.n = 100 + self.nc = 0 # self.n + if (self.n == 100) and (sampling_method == 'simplicial'): + self.n = 2 ** self.dim + 1 + + if not ((self.maxiter is None) and (self.maxfev is None) and ( + self.maxev is None) + and (self.minhgrd is None) and (self.f_min_true is None)): + self.iters = None + + # Set complex construction mode based on a provided stopping criteria: + # Initialise sampling Complex and function cache + # Note that sfield_args=() since args are already wrapped in self.func + # using the_FunctionWrapper class. + self.HC = Complex(dim=self.dim, domain=self.bounds, + sfield=self.func, sfield_args=(), + symmetry=self.symmetry, + constraints=self.constraints, + workers=workers) + + # Choose complex constructor + if sampling_method == 'simplicial': + self.iterate_complex = self.iterate_hypercube + self.sampling_method = sampling_method + + elif sampling_method in ['halton', 'sobol'] or \ + not isinstance(sampling_method, str): + self.iterate_complex = self.iterate_delaunay + # Sampling method used + if sampling_method in ['halton', 'sobol']: + if sampling_method == 'sobol': + self.n = int(2 ** np.ceil(np.log2(self.n))) + # self.n #TODO: Should always be self.n, this is + # unacceptable for shgo, check that nfev behaves as + # expected. + self.nc = 0 + self.sampling_method = 'sobol' + self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False, + seed=0) + else: + self.sampling_method = 'halton' + self.qmc_engine = qmc.Halton(d=self.dim, scramble=True, + seed=0) + + def sampling_method(n, d): + return self.qmc_engine.random(n) + + else: + # A user defined sampling method: + self.sampling_method = 'custom' + + self.sampling = self.sampling_custom + self.sampling_function = sampling_method # F(n, d) + + # Local controls + self.stop_l_iter = False # Local minimisation iterations + self.stop_complex_iter = False # Sampling iterations + + # Initiate storage objects used in algorithm classes + self.minimizer_pool = [] + + # Cache of local minimizers mapped + self.LMC = LMapCache() + + # Initialize return object + self.res = OptimizeResult() # scipy.optimize.OptimizeResult object + self.res.nfev = 0 # Includes each sampling point as func evaluation + self.res.nlfev = 0 # Local function evals for all minimisers + self.res.nljev = 0 # Local Jacobian evals for all minimisers + self.res.nlhev = 0 # Local Hessian evals for all minimisers + + # Initiation aids + def init_options(self, options): + """ + Initiates the options. + + Can also be useful to change parameters after class initiation. + + Parameters + ---------- + options : dict + + Returns + ------- + None + + """ + # Update 'options' dict passed to optimize.minimize + # Do this first so we don't mutate `options` below. + self.minimizer_kwargs['options'].update(options) + + # Ensure that 'jac', 'hess', and 'hessp' are passed directly to + # `minimize` as keywords, not as part of its 'options' dictionary. + for opt in ['jac', 'hess', 'hessp']: + if opt in self.minimizer_kwargs['options']: + self.minimizer_kwargs[opt] = ( + self.minimizer_kwargs['options'].pop(opt)) + + # Default settings: + self.minimize_every_iter = options.get('minimize_every_iter', True) + + # Algorithm limits + # Maximum number of iterations to perform. + self.maxiter = options.get('maxiter', None) + # Maximum number of function evaluations in the feasible domain + self.maxfev = options.get('maxfev', None) + # Maximum number of sampling evaluations (includes searching in + # infeasible points + self.maxev = options.get('maxev', None) + # Maximum processing runtime allowed + self.init = time.time() + self.maxtime = options.get('maxtime', None) + if 'f_min' in options: + # Specify the minimum objective function value, if it is known. + self.f_min_true = options['f_min'] + self.f_tol = options.get('f_tol', 1e-4) + else: + self.f_min_true = None + + self.minhgrd = options.get('minhgrd', None) + + # Objective function knowledge + self.symmetry = options.get('symmetry', False) + if self.symmetry: + self.symmetry = [0, ]*len(self.bounds) + else: + self.symmetry = None + # Algorithm functionality + # Only evaluate a few of the best candidates + self.local_iter = options.get('local_iter', False) + self.infty_cons_sampl = options.get('infty_constraints', True) + + # Feedback + self.disp = options.get('disp', False) + + def __enter__(self): + return self + + def __exit__(self, *args): + return self.HC.V._mapwrapper.__exit__(*args) + + # Iteration properties + # Main construction loop: + def iterate_all(self): + """ + Construct for `iters` iterations. + + If uniform sampling is used, every iteration adds 'n' sampling points. + + Iterations if a stopping criteria (e.g., sampling points or + processing time) has been met. + + """ + if self.disp: + logging.info('Splitting first generation') + + while not self.stop_global: + if self.break_routine: + break + # Iterate complex, process minimisers + self.iterate() + self.stopping_criteria() + + # Build minimiser pool + # Final iteration only needed if pools weren't minimised every + # iteration + if not self.minimize_every_iter: + if not self.break_routine: + self.find_minima() + + self.res.nit = self.iters_done # + 1 + self.fn = self.HC.V.nfev + + def find_minima(self): + """ + Construct the minimizer pool, map the minimizers to local minima + and sort the results into a global return object. + """ + if self.disp: + logging.info('Searching for minimizer pool...') + + self.minimizers() + + if len(self.X_min) != 0: + # Minimize the pool of minimizers with local minimization methods + # Note that if Options['local_iter'] is an `int` instead of default + # value False then only that number of candidates will be minimized + self.minimise_pool(self.local_iter) + # Sort results and build the global return object + self.sort_result() + + # Lowest values used to report in case of failures + self.f_lowest = self.res.fun + self.x_lowest = self.res.x + else: + self.find_lowest_vertex() + + if self.disp: + logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}") + + def find_lowest_vertex(self): + # Find the lowest objective function value on one of + # the vertices of the simplicial complex + self.f_lowest = np.inf + for x in self.HC.V.cache: + if self.HC.V[x].f < self.f_lowest: + if self.disp: + logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}') + self.f_lowest = self.HC.V[x].f + self.x_lowest = self.HC.V[x].x_a + for lmc in self.LMC.cache: + if self.LMC[lmc].f_min < self.f_lowest: + self.f_lowest = self.LMC[lmc].f_min + self.x_lowest = self.LMC[lmc].x_l + + if self.f_lowest == np.inf: # no feasible point + self.f_lowest = None + self.x_lowest = None + + # Stopping criteria functions: + def finite_iterations(self): + mi = min(x for x in [self.iters, self.maxiter] if x is not None) + if self.disp: + logging.info(f'Iterations done = {self.iters_done} / {mi}') + if self.iters is not None: + if self.iters_done >= (self.iters): + self.stop_global = True + + if self.maxiter is not None: # Stop for infeasible sampling + if self.iters_done >= (self.maxiter): + self.stop_global = True + return self.stop_global + + def finite_fev(self): + # Finite function evals in the feasible domain + if self.disp: + logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}') + if self.fn >= self.maxfev: + self.stop_global = True + return self.stop_global + + def finite_ev(self): + # Finite evaluations including infeasible sampling points + if self.disp: + logging.info(f'Sampling evaluations done = {self.n_sampled} ' + f'/ {self.maxev}') + if self.n_sampled >= self.maxev: + self.stop_global = True + + def finite_time(self): + if self.disp: + logging.info(f'Time elapsed = {time.time() - self.init} ' + f'/ {self.maxtime}') + if (time.time() - self.init) >= self.maxtime: + self.stop_global = True + + def finite_precision(self): + """ + Stop the algorithm if the final function value is known + + Specify in options (with ``self.f_min_true = options['f_min']``) + and the tolerance with ``f_tol = options['f_tol']`` + """ + # If no minimizer has been found use the lowest sampling value + self.find_lowest_vertex() + if self.disp: + logging.info(f'Lowest function evaluation = {self.f_lowest}') + logging.info(f'Specified minimum = {self.f_min_true}') + # If no feasible point was return from test + if self.f_lowest is None: + return self.stop_global + + # Function to stop algorithm at specified percentage error: + if self.f_min_true == 0.0: + if self.f_lowest <= self.f_tol: + self.stop_global = True + else: + pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true) + if self.f_lowest <= self.f_min_true: + self.stop_global = True + # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)): + if abs(pe) >= 2 * self.f_tol: + warnings.warn( + f"A much lower value than expected f* = {self.f_min_true} " + f"was found f_lowest = {self.f_lowest}", + stacklevel=3 + ) + if pe <= self.f_tol: + self.stop_global = True + + return self.stop_global + + def finite_homology_growth(self): + """ + Stop the algorithm if homology group rank did not grow in iteration. + """ + if self.LMC.size == 0: + return # pass on no reason to stop yet. + self.hgrd = self.LMC.size - self.hgr + + self.hgr = self.LMC.size + if self.hgrd <= self.minhgrd: + self.stop_global = True + if self.disp: + logging.info(f'Current homology growth = {self.hgrd} ' + f' (minimum growth = {self.minhgrd})') + return self.stop_global + + def stopping_criteria(self): + """ + Various stopping criteria ran every iteration + + Returns + ------- + stop : bool + """ + if self.maxiter is not None: + self.finite_iterations() + if self.iters is not None: + self.finite_iterations() + if self.maxfev is not None: + self.finite_fev() + if self.maxev is not None: + self.finite_ev() + if self.maxtime is not None: + self.finite_time() + if self.f_min_true is not None: + self.finite_precision() + if self.minhgrd is not None: + self.finite_homology_growth() + return self.stop_global + + def iterate(self): + self.iterate_complex() + + # Build minimizer pool + if self.minimize_every_iter: + if not self.break_routine: + self.find_minima() # Process minimizer pool + + # Algorithm updates + self.iters_done += 1 + + def iterate_hypercube(self): + """ + Iterate a subdivision of the complex + + Note: called with ``self.iterate_complex()`` after class initiation + """ + # Iterate the complex + if self.disp: + logging.info('Constructing and refining simplicial complex graph ' + 'structure') + if self.n is None: + self.HC.refine_all() + self.n_sampled = self.HC.V.size() # nevs counted + else: + self.HC.refine(self.n) + self.n_sampled += self.n + + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Re-add minimisers to complex + if len(self.LMC.xl_maps) > 0: + for xl in self.LMC.cache: + v = self.HC.V[xl] + v_near = v.star() + for v in v.nn: + v_near = v_near.union(v.nn) + # Reconnect vertices to complex + # if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l), + # near=v_near): + # continue + # else: + # If failure to find in v_near, then search all vertices + # (very expensive operation: + # self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l) + # ) + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + return + + def iterate_delaunay(self): + """ + Build a complex of Delaunay triangulated points + + Note: called with ``self.iterate_complex()`` after class initiation + """ + self.nc += self.n + self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl) + + # Add sampled points to a triangulation, construct self.Tri + if self.disp: + logging.info(f'self.n = {self.n}') + logging.info(f'self.nc = {self.nc}') + logging.info('Constructing and refining simplicial complex graph ' + 'structure from sampling points.') + + if self.dim < 2: + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Ind_sorted = self.Ind_sorted.flatten() + tris = [] + for ind, ind_s in enumerate(self.Ind_sorted): + if ind > 0: + tris.append(self.Ind_sorted[ind - 1:ind + 1]) + + tris = np.array(tris) + # Store 1D triangulation: + self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris) + self.points = {} + else: + if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built + self.delaunay_triangulation(n_prc=self.n_prc) + self.n_prc = self.C.shape[0] + + if self.disp: + logging.info('Triangulation completed, evaluating all ' + 'constraints and objective function values.') + + if hasattr(self, 'Tri'): + self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices) + + # Process all pools + # Evaluate all constraints and functions + if self.disp: + logging.info('Triangulation completed, evaluating all constraints ' + 'and objective function values.') + + # Evaluate all constraints and functions + self.HC.V.process_pools() + if self.disp: + logging.info('Evaluations completed.') + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + self.n_sampled = self.nc # nevs counted in triangulation + return + + # Hypercube minimizers + def minimizers(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can implement parallelization here + for x in self.HC.V.cache: + in_LMC = False + if len(self.LMC.xl_maps) > 0: + for xlmi in self.LMC.xl_maps: + if np.all(np.array(x) == np.array(xlmi)): + in_LMC = True + if in_LMC: + continue + + if self.HC.V[x].minimiser(): + if self.disp: + logging.info('=' * 60) + logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer') + logging.info(f'v.f = {self.HC.V[x].f} is minimizer') + logging.info('=' * 30) + + if self.HC.V[x] not in self.minimizer_pool: + self.minimizer_pool.append(self.HC.V[x]) + + if self.disp: + logging.info('Neighbors:') + logging.info('=' * 30) + for vn in self.HC.V[x].nn: + logging.info(f'x = {vn.x} || f = {vn.f}') + + logging.info('=' * 60) + self.minimizer_pool_F = [] + self.X_min = [] + # normalized tuple in the Vertex cache + self.X_min_cache = {} # Cache used in hypercube sampling + + for v in self.minimizer_pool: + self.X_min.append(v.x_a) + self.minimizer_pool_F.append(v.f) + self.X_min_cache[tuple(v.x_a)] = v.x + + self.minimizer_pool_F = np.array(self.minimizer_pool_F) + self.X_min = np.array(self.X_min) + + # TODO: Only do this if global mode + self.sort_min_pool() + + return self.X_min + + # Local minimisation + # Minimiser pool processing + def minimise_pool(self, force_iter=False): + """ + This processing method can optionally minimise only the best candidate + solutions in the minimiser pool + + Parameters + ---------- + force_iter : int + Number of starting minimizers to process (can be specified + globally or locally) + + """ + # Find first local minimum + # NOTE: Since we always minimize this value regardless it is a waste to + # build the topograph first before minimizing + lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0]) + + # Trim minimized point from current minimizer set + self.trim_min_pool(0) + + while not self.stop_l_iter: + # Global stopping criteria: + self.stopping_criteria() + + # Note first iteration is outside loop: + if force_iter: + force_iter -= 1 + if force_iter == 0: + self.stop_l_iter = True + break + + if np.shape(self.X_min)[0] == 0: + self.stop_l_iter = True + break + + # Construct topograph from current minimizer set + # (NOTE: This is a very small topograph using only the minizer pool + # , it might be worth using some graph theory tools instead. + self.g_topograph(lres_f_min.x, self.X_min) + + # Find local minimum at the miniser with the greatest Euclidean + # distance from the current solution + ind_xmin_l = self.Z[:, -1] + lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1]) + + # Trim minimised point from current minimizer set + self.trim_min_pool(ind_xmin_l) + + # Reset controls + self.stop_l_iter = False + return + + def sort_min_pool(self): + # Sort to find minimum func value in min_pool + self.ind_f_min = np.argsort(self.minimizer_pool_F) + self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min] + self.minimizer_pool_F = np.array(self.minimizer_pool_F)[ + self.ind_f_min] + return + + def trim_min_pool(self, trim_ind): + self.X_min = np.delete(self.X_min, trim_ind, axis=0) + self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind) + self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind) + return + + def g_topograph(self, x_min, X_min): + """ + Returns the topographical vector stemming from the specified value + ``x_min`` for the current feasible set ``X_min`` with True boolean + values indicating positive entries and False values indicating + negative entries. + + """ + x_min = np.array([x_min]) + self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean') + # Find sorted indexes of spatial distances: + self.Z = np.argsort(self.Y, axis=-1) + + self.Ss = X_min[self.Z][0] + self.minimizer_pool = self.minimizer_pool[self.Z] + self.minimizer_pool = self.minimizer_pool[0] + return self.Ss + + # Local bound functions + def construct_lcb_simplicial(self, v_min): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + # Loop over all bounds + for vn in v_min.nn: + for i, x_i in enumerate(vn.x_a): + # Lower bound + if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]): + cbounds[i][0] = x_i + + # Upper bound + if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]): + cbounds[i][1] = x_i + + if self.disp: + logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}') + logging.info(f'cbounds = {cbounds}') + + return cbounds + + def construct_lcb_delaunay(self, v_min, ind=None): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimizer vertex + + Returns + ------- + cbounds : list of lists + List of size dimension with length-2 list of bounds for each + dimension. + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + + return cbounds + + # Minimize a starting point locally + def minimize(self, x_min, ind=None): + """ + This function is used to calculate the local minima using the specified + sampling point as a starting value. + + Parameters + ---------- + x_min : vector of floats + Current starting point to minimize. + + Returns + ------- + lres : OptimizeResult + The local optimization result represented as a `OptimizeResult` + object. + """ + # Use minima maps if vertex was already run + if self.disp: + logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}') + + if self.LMC[x_min].lres is not None: + logging.info(f'Found self.LMC[x_min].lres = ' + f'{self.LMC[x_min].lres}') + return self.LMC[x_min].lres + + if self.callback is not None: + logging.info(f'Callback for minimizer starting at {x_min}:') + + if self.disp: + logging.info(f'Starting minimization at {x_min}...') + + if self.sampling_method == 'simplicial': + x_min_t = tuple(x_min) + # Find the normalized tuple in the Vertex cache: + x_min_t_norm = self.X_min_cache[tuple(x_min_t)] + x_min_t_norm = tuple(x_min_t_norm) + g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm]) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + else: + g_bounds = self.construct_lcb_delaunay(x_min, ind=ind) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + logging.info(self.minimizer_kwargs['bounds']) + + if self.disp and 'bounds' in self.minimizer_kwargs: + logging.info('bounds in kwarg:') + logging.info(self.minimizer_kwargs['bounds']) + + # Local minimization using scipy.optimize.minimize: + lres = minimize(self.func, x_min, **self.minimizer_kwargs) + + if self.disp: + logging.info(f'lres = {lres}') + + # Local function evals for all minimizers + self.res.nlfev += lres.nfev + if 'njev' in lres: + self.res.nljev += lres.njev + if 'nhev' in lres: + self.res.nlhev += lres.nhev + + try: # Needed because of the brain dead 1x1 NumPy arrays + lres.fun = lres.fun[0] + except (IndexError, TypeError): + lres.fun + + # Append minima maps + self.LMC[x_min] + self.LMC.add_res(x_min, lres, bounds=g_bounds) + + return lres + + # Post local minimization processing + def sort_result(self): + """ + Sort results and build the global return object + """ + # Sort results in local minima cache + results = self.LMC.sort_cache_result() + self.res.xl = results['xl'] + self.res.funl = results['funl'] + self.res.x = results['x'] + self.res.fun = results['fun'] + + # Add local func evals to sampling func evals + # Count the number of feasible vertices and add to local func evals: + self.res.nfev = self.fn + self.res.nlfev + return self.res + + # Algorithm controls + def fail_routine(self, mes=("Failed to converge")): + self.break_routine = True + self.res.success = False + self.X_min = [None] + self.res.message = mes + + def sampled_surface(self, infty_cons_sampl=False): + """ + Sample the function surface. + + There are 2 modes, if ``infty_cons_sampl`` is True then the sampled + points that are generated outside the feasible domain will be + assigned an ``inf`` value in accordance with SHGO rules. + This guarantees convergence and usually requires less objective + function evaluations at the computational costs of more Delaunay + triangulation points. + + If ``infty_cons_sampl`` is False, then the infeasible points are + discarded and only a subspace of the sampled points are used. This + comes at the cost of the loss of guaranteed convergence and usually + requires more objective function evaluations. + """ + # Generate sampling points + if self.disp: + logging.info('Generating sampling points') + self.sampling(self.nc, self.dim) + if len(self.LMC.xl_maps) > 0: + self.C = np.vstack((self.C, np.array(self.LMC.xl_maps))) + if not infty_cons_sampl: + # Find subspace of feasible points + if self.g_cons is not None: + self.sampling_subspace() + + # Sort remaining samples + self.sorted_samples() + + # Find objective function references + self.n_sampled = self.nc + + def sampling_custom(self, n, dim): + """ + Generates uniform sampling points in a hypercube and scales the points + to the bound limits. + """ + # Generate sampling points. + # Generate uniform sample points in [0, 1]^m \subset R^m + if self.n_sampled == 0: + self.C = self.sampling_function(n, dim) + else: + self.C = self.sampling_function(n, dim) + # Distribute over bounds + for i in range(len(self.bounds)): + self.C[:, i] = (self.C[:, i] * + (self.bounds[i][1] - self.bounds[i][0]) + + self.bounds[i][0]) + return self.C + + def sampling_subspace(self): + """Find subspace of feasible points from g_func definition""" + # Subspace of feasible points. + for ind, g in enumerate(self.g_cons): + # C.shape = (Z, dim) where Z is the number of sampling points to + # evaluate and dim is the dimensionality of the problem. + # the constraint function may not be vectorised so have to step + # through each sampling point sequentially. + feasible = np.array( + [np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C], + dtype=bool + ) + self.C = self.C[feasible] + + if self.C.size == 0: + self.res.message = ('No sampling point found within the ' + + 'feasible set. Increasing sampling ' + + 'size.') + # sampling correctly for both 1-D and >1-D cases + if self.disp: + logging.info(self.res.message) + + def sorted_samples(self): # Validated + """Find indexes of the sorted sampling points""" + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Xs = self.C[self.Ind_sorted] + return self.Ind_sorted, self.Xs + + def delaunay_triangulation(self, n_prc=0): + if hasattr(self, 'Tri') and self.qhull_incremental: + # TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps) + # in self.sampled_surface + self.Tri.add_points(self.C[n_prc:, :]) + else: + try: + self.Tri = spatial.Delaunay(self.C, + incremental=self.qhull_incremental, + ) + except spatial.QhullError: + if str(sys.exc_info()[1])[:6] == 'QH6239': + logging.warning('QH6239 Qhull precision error detected, ' + 'this usually occurs when no bounds are ' + 'specified, Qhull can only run with ' + 'handling cocircular/cospherical points' + ' and in this case incremental mode is ' + 'switched off. The performance of shgo ' + 'will be reduced in this mode.') + self.qhull_incremental = False + self.Tri = spatial.Delaunay(self.C, + incremental= + self.qhull_incremental) + else: + raise + + return self.Tri + + +class LMap: + def __init__(self, v): + self.v = v + self.x_l = None + self.lres = None + self.f_min = None + self.lbounds = [] + + +class LMapCache: + def __init__(self): + self.cache = {} + + # Lists for search queries + self.v_maps = [] + self.xl_maps = [] + self.xl_maps_set = set() + self.f_maps = [] + self.lbound_maps = [] + self.size = 0 + + def __getitem__(self, v): + try: + v = np.ndarray.tolist(v) + except TypeError: + pass + v = tuple(v) + try: + return self.cache[v] + except KeyError: + xval = LMap(v) + self.cache[v] = xval + + return self.cache[v] + + def add_res(self, v, lres, bounds=None): + v = np.ndarray.tolist(v) + v = tuple(v) + self.cache[v].x_l = lres.x + self.cache[v].lres = lres + self.cache[v].f_min = lres.fun + self.cache[v].lbounds = bounds + + # Update cache size + self.size += 1 + + # Cache lists for search queries + self.v_maps.append(v) + self.xl_maps.append(lres.x) + self.xl_maps_set.add(tuple(lres.x)) + self.f_maps.append(lres.fun) + self.lbound_maps.append(bounds) + + def sort_cache_result(self): + """ + Sort results and build the global return object + """ + results = {} + # Sort results and save + self.xl_maps = np.array(self.xl_maps) + self.f_maps = np.array(self.f_maps) + + # Sorted indexes in Func_min + ind_sorted = np.argsort(self.f_maps) + + # Save ordered list of minima + results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals + self.f_maps = np.array(self.f_maps) + results['funl'] = self.f_maps[ind_sorted] + results['funl'] = results['funl'].T + + # Find global of all minimizers + results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima + results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value + + self.xl_maps = np.ndarray.tolist(self.xl_maps) + self.f_maps = np.ndarray.tolist(self.f_maps) + return results diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86e3dce0d8c336a08e7f83200d2565a65b7b59fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59e374fa8033f2e8e98ee85917fcb731f7061aac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dd544353030cb7de874b30a144e49d17e1f45e4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..e725c00cc6d238008afb333b1cee9e3fc5400caa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py @@ -0,0 +1,1225 @@ +"""Base classes for low memory simplicial complex structures.""" +import copy +import logging +import itertools +import decimal +from functools import cache + +import numpy + +from ._vertex import (VertexCacheField, VertexCacheIndex) + + +class Complex: + """ + Base class for a simplicial complex described as a cache of vertices + together with their connections. + + Important methods: + Domain triangulation: + Complex.triangulate, Complex.split_generation + Triangulating arbitrary points (must be traingulable, + may exist outside domain): + Complex.triangulate(sample_set) + Converting another simplicial complex structure data type to the + structure used in Complex (ex. OBJ wavefront) + Complex.convert(datatype, data) + + Important objects: + HC.V: The cache of vertices and their connection + HC.H: Storage structure of all vertex groups + + Parameters + ---------- + dim : int + Spatial dimensionality of the complex R^dim + domain : list of tuples, optional + The bounds [x_l, x_u]^dim of the hyperrectangle space + ex. The default domain is the hyperrectangle [0, 1]^dim + Note: The domain must be convex, non-convex spaces can be cut + away from this domain using the non-linear + g_cons functions to define any arbitrary domain + (these domains may also be disconnected from each other) + sfield : + A scalar function defined in the associated domain f: R^dim --> R + sfield_args : tuple + Additional arguments to be passed to `sfield` + vfield : + A scalar function defined in the associated domain + f: R^dim --> R^m + (for example a gradient function of the scalar field) + vfield_args : tuple + Additional arguments to be passed to vfield + symmetry : None or list + Specify if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by up to + O(n!) times in the fully symmetric case. + + E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + In this equation x_2 and x_3 are symmetric to x_1, while x_5 and + x_6 are symmetric to x_4, this can be specified to the solver as: + + symmetry = [0, # Variable 1 + 0, # symmetric to variable 1 + 0, # symmetric to variable 1 + 3, # Variable 4 + 3, # symmetric to variable 4 + 3, # symmetric to variable 4 + ] + + constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be + non-negative.constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (unused). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + """ + def __init__(self, dim, domain=None, sfield=None, sfield_args=(), + symmetry=None, constraints=None, workers=1): + self.dim = dim + + # Domains + self.domain = domain + if domain is None: + self.bounds = [(0.0, 1.0), ] * dim + else: + self.bounds = domain + self.symmetry = symmetry + # here in init to avoid if checks + + # Field functions + self.sfield = sfield + self.sfield_args = sfield_args + + # Process constraints + # Constraints + # Process constraint dict sequence: + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + if not isinstance(constraints, (tuple, list)): + constraints = (constraints,) + + for cons in constraints: + if cons['type'] in ('ineq'): + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Homology properties + self.gen = 0 + self.perm_cycle = 0 + + # Every cell is stored in a list of its generation, + # ex. the initial cell is stored in self.H[0] + # 1st get new cells are stored in self.H[1] etc. + # When a cell is sub-generated it is removed from this list + + self.H = [] # Storage structure of vertex groups + + # Cache of all vertices + if (sfield is not None) or (self.g_cons is not None): + # Initiate a vertex cache and an associated field cache, note that + # the field case is always initiated inside the vertex cache if an + # associated field scalar field is defined: + if sfield is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + elif self.g_cons is not None: + self.V = VertexCacheField(field=sfield, field_args=sfield_args, + g_cons=self.g_cons, + g_cons_args=self.g_args, + workers=workers) + else: + self.V = VertexCacheIndex() + + self.V_non_symm = [] # List of non-symmetric vertices + + def __call__(self): + return self.H + + # %% Triangulation methods + def cyclic_product(self, bounds, origin, supremum, centroid=True): + """Generate initial triangulation using cyclic product""" + # Define current hyperrectangle + vot = tuple(origin) + vut = tuple(supremum) # Hyperrectangle supremum + self.V[vot] + vo = self.V[vot] + yield vo.x + self.V[vut].connect(self.V[vot]) + yield vut + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + C0x = [[self.V[vot]]] + a_vo = copy.copy(list(origin)) + a_vo[0] = vut[0] # Update aN Origin + a_vo = self.V[tuple(a_vo)] + # self.V[vot].connect(self.V[tuple(a_vo)]) + self.V[vot].connect(a_vo) + yield a_vo.x + C1x = [[a_vo]] + # C1x = [[self.V[tuple(a_vo)]]] + ab_C = [] # Container for a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + C0x.append([]) + C1x.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + x[1] + # Copy lists for iteration + cC0x = [x[:] for x in C0x[:i + 1]] + cC1x = [x[:] for x in C1x[:i + 1]] + for j, (VL, VU) in enumerate(zip(cC0x, cC1x)): + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + + # Connect vertices in N to corresponding vertices + # in aN: + vl.connect(a_vl) + + yield a_vl.x + + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + + # Connect new vertex pair in aN: + a_vl.connect(a_vu) + + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + + # Update the containers + C0x[i + 1].append(vl) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vl) + C1x[i + 1].append(a_vu) + + # Update old containers + C0x[j].append(a_vl) + C1x[j].append(a_vu) + + # Yield new points + yield a_vu.x + + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) + + for vp in ab_Cc: + b_v = list(vp[0].x) + ab_v = list(vp[1].x) + b_v[i + 1] = vut[i + 1] + ab_v[i + 1] = vut[i + 1] + b_v = self.V[tuple(b_v)] # b + vl + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + b_v.connect(ab_v) # s-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + ab_C.append((b_v, ab_v)) + + except IndexError: + cC0x = C0x[i] + cC1x = C1x[i] + VL, VU = cC0x, cC1x + for k, (vl, vu) in enumerate(zip(VL, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + # Connect vertices in N to corresponding vertices + # in aN: + vu.connect(a_vu) + # Connect new vertex pair in aN: + # a_vl.connect(a_vu) + # Connect lower pair to upper (triangulation + # operation of a + b (two arbitrary operations): + vl.connect(a_vu) + ab_C.append((vl, a_vu)) + C0x[i + 1].append(vu) + C1x[i + 1].append(a_vu) + # Yield new points + a_vu.connect(self.V[vut]) + yield a_vu.x + ab_Cc = copy.copy(ab_C) + for vp in ab_Cc: + if vp[1].x[i] == vut[i]: + ab_v = list(vp[1].x) + ab_v[i + 1] = vut[i + 1] + ab_v = self.V[tuple(ab_v)] # b + a_vl + # Note o---o is already connected + vp[0].connect(ab_v) # o-s + + # Add new list of cross pairs + ab_C.append((vp[0], ab_v)) + + # Clean class trash + try: + del C0x + del cC0x + del C1x + del cC1x + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + # Extra yield to ensure that the triangulation is completed + if centroid: + vo = self.V[vot] + vs = self.V[vut] + # Disconnect the origin and supremum + vo.disconnect(vs) + # Build centroid + vc = self.split_edge(vot, vut) + for v in vo.nn: + v.connect(vc) + yield vc.x + return vc.x + else: + yield vut + return vut + + def triangulate(self, n=None, symmetry=None, centroid=True, + printout=False): + """ + Triangulate the initial domain, if n is not None then a limited number + of points will be generated + + Parameters + ---------- + n : int, Number of points to be sampled. + symmetry : + + Ex. Dictionary/hashtable + f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 + + symmetry = symmetry[0]: 0, # Variable 1 + symmetry[1]: 0, # symmetric to variable 1 + symmetry[2]: 0, # symmetric to variable 1 + symmetry[3]: 3, # Variable 4 + symmetry[4]: 3, # symmetric to variable 4 + symmetry[5]: 3, # symmetric to variable 4 + } + centroid : bool, if True add a central point to the hypercube + printout : bool, if True print out results + + NOTES: + ------ + Rather than using the combinatorial algorithm to connect vertices we + make the following observation: + + The bound pairs are similar a C2 cyclic group and the structure is + formed using the cartesian product: + + H = C2 x C2 x C2 ... x C2 (dim times) + + So construct any normal subgroup N and consider H/N first, we connect + all vertices within N (ex. N is C2 (the first dimension), then we move + to a left coset aN (an operation moving around the defined H/N group by + for example moving from the lower bound in C2 (dimension 2) to the + higher bound in C2. During this operation connection all the vertices. + Now repeat the N connections. Note that these elements can be connected + in parallel. + """ + # Inherit class arguments + if symmetry is None: + symmetry = self.symmetry + # Build origin and supremum vectors + origin = [i[0] for i in self.bounds] + self.origin = origin + supremum = [i[1] for i in self.bounds] + + self.supremum = supremum + + if symmetry is None: + cbounds = self.bounds + else: + cbounds = copy.copy(self.bounds) + for i, j in enumerate(symmetry): + if i is not j: + # pop second entry on second symmetry vars + cbounds[i] = [self.bounds[symmetry[i]][0]] + # Sole (first) entry is the sup value and there is no + # origin: + cbounds[i] = [self.bounds[symmetry[i]][1]] + if (self.bounds[symmetry[i]] is not + self.bounds[symmetry[j]]): + logging.warning(f"Variable {i} was specified as " + f"symmetetric to variable {j}, however" + f", the bounds {i} =" + f" {self.bounds[symmetry[i]]} and {j}" + f" =" + f" {self.bounds[symmetry[j]]} do not " + f"match, the mismatch was ignored in " + f"the initial triangulation.") + cbounds[i] = self.bounds[symmetry[j]] + + if n is None: + # Build generator + self.cp = self.cyclic_product(cbounds, origin, supremum, centroid) + for i in self.cp: + i + + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + else: + # Check if generator already exists + try: + self.cp + except (AttributeError, KeyError): + self.cp = self.cyclic_product(cbounds, origin, supremum, + centroid) + + try: + while len(self.V.cache) < n: + next(self.cp) + except StopIteration: + try: + self.triangulated_vectors.append((tuple(self.origin), + tuple(self.supremum))) + except (AttributeError, KeyError): + self.triangulated_vectors = [(tuple(self.origin), + tuple(self.supremum))] + + if printout: + # for v in self.C0(): + # v.print_out() + for v in self.V.cache: + self.V[v].print_out() + + return + + def refine(self, n=1): + if n is None: + try: + self.triangulated_vectors + self.refine_all() + return + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry) + return + else: + raise + + nt = len(self.V.cache) + n # Target number of total vertices + # In the outer while loop we iterate until we have added an extra `n` + # vertices to the complex: + while len(self.V.cache) < nt: # while loop 1 + try: # try 1 + # Try to access triangulated_vectors, this should only be + # defined if an initial triangulation has already been + # performed: + self.triangulated_vectors + # Try a usual iteration of the current generator, if it + # does not exist or is exhausted then produce a new generator + try: # try 2 + next(self.rls) + except (AttributeError, StopIteration, KeyError): + vp = self.triangulated_vectors[0] + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + next(self.rls) + + except (AttributeError, KeyError): + # If an initial triangulation has not been completed, then + # we start/continue the initial triangulation targeting `nt` + # vertices, if nt is greater than the initial number of + # vertices then the `refine` routine will move back to try 1. + self.triangulate(nt, self.symmetry) + return + + def refine_all(self, centroids=True): + """Refine the entire domain of the current complex.""" + try: + self.triangulated_vectors + tvs = copy.copy(self.triangulated_vectors) + for i, vp in enumerate(tvs): + self.rls = self.refine_local_space(*vp, bounds=self.bounds) + for i in self.rls: + i + except AttributeError as ae: + if str(ae) == "'Complex' object has no attribute " \ + "'triangulated_vectors'": + self.triangulate(symmetry=self.symmetry, centroid=centroids) + else: + raise + + # This adds a centroid to every new sub-domain generated and defined + # by self.triangulated_vectors, in addition the vertices ! to complete + # the triangulation + return + + def refine_local_space(self, origin, supremum, bounds, centroid=1): + # Copy for later removal + origin_c = copy.copy(origin) + supremum_c = copy.copy(supremum) + + # Initiate local variables redefined in later inner `for` loop: + vl, vu, a_vu = None, None, None + + # Change the vector orientation so that it is only increasing + s_ov = list(origin) + s_origin = list(origin) + s_sv = list(supremum) + s_supremum = list(supremum) + for i, vi in enumerate(s_origin): + if s_ov[i] > s_sv[i]: + s_origin[i] = s_sv[i] + s_supremum[i] = s_ov[i] + + vot = tuple(s_origin) + vut = tuple(s_supremum) # Hyperrectangle supremum + + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + # Cyclic group approach with second x_l --- x_u operation. + + # These containers store the "lower" and "upper" vertices + # corresponding to the origin or supremum of every C2 group. + # It has the structure of `dim` times embedded lists each containing + # these vertices as the entire complex grows. Bounds[0] has to be done + # outside the loops before we have symmetric containers. + # NOTE: This means that bounds[0][1] must always exist + + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + if tuple(a_vl) not in self.V.cache: + vo = self.V[vot] # initiate if doesn't exist yet + vs = self.V[vut] + # Start by finding the old centroid of the new space: + vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg + + # Find set of extreme vertices in current local space + sup_set = copy.copy(vco.nn) + a_vl = copy.copy(list(vot)) + a_vl[0] = vut[0] # Update aN Origin + a_vl = self.V[tuple(a_vl)] + else: + a_vl = self.V[tuple(a_vl)] + + c_v = self.split_edge(vo.x, a_vl.x) + c_v.connect(vco) + yield c_v.x + Cox = [[vo]] + Ccx = [[c_v]] + Cux = [[a_vl]] + ab_C = [] # Container for a + b operations + s_ab_C = [] # Container for symmetric a + b operations + + # Loop over remaining bounds + for i, x in enumerate(bounds[1:]): + # Update lower and upper containers + Cox.append([]) + Ccx.append([]) + Cux.append([]) + # try to access a second bound (if not, C1 is symmetric) + try: + t_a_vl = list(vot) + t_a_vl[i + 1] = vut[i + 1] + + # New: lists are used anyway, so copy all + # %% + # Copy lists for iteration + cCox = [x[:] for x in Cox[:i + 1]] + cCcx = [x[:] for x in Ccx[:i + 1]] + cCux = [x[:] for x in Cux[:i + 1]] + # Try to connect aN lower source of previous a + b + # operation with a aN vertex + ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the + # (VL, VC, VU) for-loop, but we use the copy of the list in the + # ab_Cc for-loop. + s_ab_Cc = copy.copy(s_ab_C) + + # Early try so that we don't have to copy the cache before + # moving on to next C1/C2: Try to add the operation of a new + # C2 product by accessing the upper bound + if tuple(t_a_vl) not in self.V.cache: + # Raise error to continue symmetric refine + raise IndexError + t_a_vu = list(vut) + t_a_vu[i + 1] = vut[i + 1] + if tuple(t_a_vu) not in self.V.cache: + # Raise error to continue symmetric refine: + raise IndexError + + for vectors in s_ab_Cc: + # s_ab_C.append([c_vc, vl, vu, a_vu]) + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vu = list(vectors[3].x) + + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + + b_vl_c = self.split_edge(b_vu.x, b_vl.x) + bc_vc.connect(b_vl_c) + + yield b_vu + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + b_vu_c = self.split_edge(b_vu.x, ba_vu.x) + bc_vc.connect(b_vu_c) + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + + # comb = [c_vc, vl, vu, a_vl, a_vu, + # bc_vc, b_vl, b_vu, ba_vl, ba_vu] + comb = [vl, vu, a_vu, + b_vl, b_vu, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + # Add new list of cross pairs + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev + + for vectors in ab_Cc: + bc_vc = list(vectors[0].x) + b_vl = list(vectors[1].x) + b_vu = list(vectors[2].x) + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + bc_vc[i + 1] = vut[i + 1] + b_vl[i + 1] = vut[i + 1] + b_vu[i + 1] = vut[i + 1] + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + bc_vc = self.V[tuple(bc_vc)] + bc_vc.connect(vco) # NOTE: Unneeded? + yield bc_vc + + # Split to centre, call this centre group "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(bc_vc) + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + b_vl = self.V[tuple(b_vl)] + bc_vc.connect(b_vl) # Connect aN cross pairs + d_bc_vc.connect(b_vl) # Connect all to centroid + yield b_vl + b_vu = self.V[tuple(b_vu)] + bc_vc.connect(b_vu) # Connect aN cross pairs + d_bc_vc.connect(b_vu) # Connect all to centroid + yield b_vu + ba_vl = self.V[tuple(ba_vl)] + bc_vc.connect(ba_vl) # Connect aN cross pairs + d_bc_vc.connect(ba_vl) # Connect all to centroid + self.split_edge(b_vu.x, ba_vl.x) + yield ba_vl + ba_vu = self.V[tuple(ba_vu)] + bc_vc.connect(ba_vu) # Connect aN cross pairs + d_bc_vc.connect(ba_vu) # Connect all to centroid + # Split the a + b edge of the initial triangulation: + os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s + ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s + yield os_v.x # often equal to vco, but not always + yield ss_v.x # often equal to bc_vu, but not always + yield ba_vu + # Split remaining to centre, call this centre group + # "d = 0.5*a" + d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + yield d_bc_vc.x + d_b_vl = self.split_edge(vectors[1].x, b_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vl) # Connect dN cross pairs + yield d_b_vl.x + d_b_vu = self.split_edge(vectors[2].x, b_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_b_vu) # Connect dN cross pairs + yield d_b_vu.x + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs + yield d_ba_vl + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_bc_vc.connect(vco) # NOTE: Unneeded? + d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs + yield d_ba_vu + c_vc, vl, vu, a_vl, a_vu = vectors + + comb = [vl, vu, a_vl, a_vu, + b_vl, b_vu, ba_vl, ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Add new list of cross pairs + ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu)) + ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu)) + ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) + ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl)) + + for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)): + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper C3 group in N: + a_vl = list(vl.x) + a_vu = list(vu.x) + a_vl[i + 1] = vut[i + 1] + a_vu[i + 1] = vut[i + 1] + a_vl = self.V[tuple(a_vl)] + a_vu = self.V[tuple(a_vu)] + # Note, build (a + vc) later for consistent yields + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + # Build cN vertices for each lower-upper C3 group in N: + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vl) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield c_vc.x + c_vl = self.split_edge(vl.x, a_vl.x) + c_vl.connect(vco) + c_vc.connect(c_vl) # Connect cN group vertices + yield c_vl.x + # yield at end of loop: + c_vu = self.split_edge(vu.x, a_vu.x) + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield c_vu.x + + a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ? + a_vc.connect(vco) + a_vc.connect(c_vc) + + # Storage for connecting c + ac operations: + ab_C.append((c_vc, vl, vu, a_vl, a_vu)) + + # Update the containers + Cox[i + 1].append(vl) + Cox[i + 1].append(vc) + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vl) + Ccx[i + 1].append(c_vc) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vl) + Cux[i + 1].append(a_vc) + Cux[i + 1].append(a_vu) + + # Update old containers + Cox[j].append(c_vl) # ! + Cox[j].append(a_vl) + Ccx[j].append(c_vc) # ! + Ccx[j].append(a_vc) # ! + Cux[j].append(c_vu) # ! + Cux[j].append(a_vu) + + # Yield new points + yield a_vc.x + + except IndexError: + for vectors in ab_Cc: + ba_vl = list(vectors[3].x) + ba_vu = list(vectors[4].x) + ba_vl[i + 1] = vut[i + 1] + ba_vu[i + 1] = vut[i + 1] + ba_vu = self.V[tuple(ba_vu)] + yield ba_vu + d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s + yield ba_vu + d_bc_vc.connect(vectors[1]) # Connect all to centroid + d_bc_vc.connect(vectors[2]) # Connect all to centroid + d_bc_vc.connect(vectors[3]) # Connect all to centroid + d_bc_vc.connect(vectors[4]) # Connect all to centroid + yield d_bc_vc.x + ba_vl = self.V[tuple(ba_vl)] + yield ba_vl + d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) + d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) + d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x) + yield d_ba_vl + yield d_ba_vu + yield d_ba_vc + c_vc, vl, vu, a_vl, a_vu = vectors + comb = [vl, vu, a_vl, a_vu, + ba_vl, + ba_vu] + comb_iter = itertools.combinations(comb, 2) + for vecs in comb_iter: + self.split_edge(vecs[0].x, vecs[1].x) + + # Copy lists for iteration + cCox = Cox[i] + cCcx = Ccx[i] + cCux = Cux[i] + VL, VC, VU = cCox, cCcx, cCux + for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): + # Build aN vertices for each lower-upper pair in N: + a_vu = list(vu.x) + a_vu[i + 1] = vut[i + 1] + + # Connect vertices in N to corresponding vertices + # in aN: + a_vu = self.V[tuple(a_vu)] + yield a_vl.x + # Split the a + b edge of the initial triangulation: + c_vc = self.split_edge(vl.x, a_vu.x) + self.split_edge(vl.x, vu.x) # Equal to vc + c_vc.connect(vco) + c_vc.connect(vc) + c_vc.connect(vl) # Connect c + ac operations + c_vc.connect(vu) # Connect c + ac operations + c_vc.connect(a_vu) # Connect c + ac operations + yield (c_vc.x) + c_vu = self.split_edge(vu.x, + a_vu.x) # yield at end of loop + c_vu.connect(vco) + # Connect remaining cN group vertices + c_vc.connect(c_vu) # Connect cN group vertices + yield (c_vu.x) + + # Update the containers + Cox[i + 1].append(vu) + Ccx[i + 1].append(c_vu) + Cux[i + 1].append(a_vu) + + # Update old containers + s_ab_C.append([c_vc, vl, vu, a_vu]) + + yield a_vu.x + + # Clean class trash + try: + del Cox + del Ccx + del Cux + del ab_C + del ab_Cc + except UnboundLocalError: + pass + + try: + self.triangulated_vectors.remove((tuple(origin_c), + tuple(supremum_c))) + except ValueError: + # Turn this into a logging warning? + pass + # Add newly triangulated vectors: + for vs in sup_set: + self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x))) + + # Extra yield to ensure that the triangulation is completed + if centroid: + vcn_set = set() + c_nn_lists = [] + for vs in sup_set: + # Build centroid + c_nn = self.vpool(vco.x, vs.x) + try: + c_nn.remove(vcn_set) + except KeyError: + pass + c_nn_lists.append(c_nn) + + for c_nn in c_nn_lists: + try: + c_nn.remove(vcn_set) + except KeyError: + pass + + for vs, c_nn in zip(sup_set, c_nn_lists): + # Build centroid + vcn = self.split_edge(vco.x, vs.x) + vcn_set.add(vcn) + try: # Shouldn't be needed? + c_nn.remove(vcn_set) + except KeyError: + pass + for vnn in c_nn: + vcn.connect(vnn) + yield vcn.x + else: + pass + + yield vut + return + + def refine_star(self, v): + """Refine the star domain of a vertex `v`.""" + # Copy lists before iteration + vnn = copy.copy(v.nn) + v1nn = [] + d_v0v1_set = set() + for v1 in vnn: + v1nn.append(copy.copy(v1.nn)) + + for v1, v1nn in zip(vnn, v1nn): + vnnu = v1nn.intersection(vnn) + + d_v0v1 = self.split_edge(v.x, v1.x) + for o_d_v0v1 in d_v0v1_set: + d_v0v1.connect(o_d_v0v1) + d_v0v1_set.add(d_v0v1) + for v2 in vnnu: + d_v1v2 = self.split_edge(v1.x, v2.x) + d_v0v1.connect(d_v1v2) + return + + @cache + def split_edge(self, v1, v2): + v1 = self.V[v1] + v2 = self.V[v2] + # Destroy original edge, if it exists: + v1.disconnect(v2) + # Compute vertex on centre of edge: + try: + vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a + except TypeError: # Allow for decimal operations + vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a + + vc = self.V[tuple(vct)] + # Connect to original 2 vertices to the new centre vertex + vc.connect(v1) + vc.connect(v2) + return vc + + def vpool(self, origin, supremum): + vot = tuple(origin) + vst = tuple(supremum) + # Initiate vertices in case they don't exist + vo = self.V[vot] + vs = self.V[vst] + + # Remove origin - supremum disconnect + + # Find the lower/upper bounds of the refinement hyperrectangle + bl = list(vot) + bu = list(vst) + for i, (voi, vsi) in enumerate(zip(vot, vst)): + if bl[i] > vsi: + bl[i] = vsi + if bu[i] < voi: + bu[i] = voi + + # NOTE: This is mostly done with sets/lists because we aren't sure + # how well the numpy arrays will scale to thousands of + # dimensions. + vn_pool = set() + vn_pool.update(vo.nn) + vn_pool.update(vs.nn) + cvn_pool = copy.copy(vn_pool) + for vn in cvn_pool: + for i, xi in enumerate(vn.x): + if bl[i] <= xi <= bu[i]: + pass + else: + try: + vn_pool.remove(vn) + except KeyError: + pass # NOTE: Not all neigbouds are in initial pool + return vn_pool + + def vf_to_vv(self, vertices, simplices): + """ + Convert a vertex-face mesh to a vertex-vertex mesh used by this class + + Parameters + ---------- + vertices : list + Vertices + simplices : list + Simplices + """ + if self.dim > 1: + for s in simplices: + edges = itertools.combinations(s, self.dim) + for e in edges: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + else: + for e in simplices: + self.V[tuple(vertices[e[0]])].connect( + self.V[tuple(vertices[e[1]])]) + return + + def connect_vertex_non_symm(self, v_x, near=None): + """ + Adds a vertex at coords v_x to the complex that is not symmetric to the + initial triangulation and sub-triangulation. + + If near is specified (for example; a star domain or collections of + cells known to contain v) then only those simplices containd in near + will be searched, this greatly speeds up the process. + + If near is not specified this method will search the entire simplicial + complex structure. + + Parameters + ---------- + v_x : tuple + Coordinates of non-symmetric vertex + near : set or list + List of vertices, these are points near v to check for + """ + if near is None: + star = self.V + else: + star = near + # Create the vertex origin + if tuple(v_x) in self.V.cache: + if self.V[v_x] in self.V_non_symm: + pass + else: + return + + self.V[v_x] + found_nn = False + S_rows = [] + for v in star: + S_rows.append(v.x) + + S_rows = numpy.array(S_rows) + A = numpy.array(S_rows) - numpy.array(v_x) + # Iterate through all the possible simplices of S_rows + for s_i in itertools.combinations(range(S_rows.shape[0]), + r=self.dim + 1): + # Check if connected, else s_i is not a simplex + valid_simplex = True + for i in itertools.combinations(s_i, r=2): + # Every combination of vertices must be connected, we check of + # the current iteration of all combinations of s_i are + # connected we break the loop if it is not. + if ((self.V[tuple(S_rows[i[1]])] not in + self.V[tuple(S_rows[i[0]])].nn) + and (self.V[tuple(S_rows[i[0]])] not in + self.V[tuple(S_rows[i[1]])].nn)): + valid_simplex = False + break + + S = S_rows[tuple([s_i])] + if valid_simplex: + if self.deg_simplex(S, proj=None): + valid_simplex = False + + # If s_i is a valid simplex we can test if v_x is inside si + if valid_simplex: + # Find the A_j0 value from the precalculated values + A_j0 = A[tuple([s_i])] + if self.in_simplex(S, v_x, A_j0): + found_nn = True + # breaks the main for loop, s_i is the target simplex: + break + + # Connect the simplex to point + if found_nn: + for i in s_i: + self.V[v_x].connect(self.V[tuple(S_rows[i])]) + # Attached the simplex to storage for all non-symmetric vertices + self.V_non_symm.append(self.V[v_x]) + # this bool value indicates a successful connection if True: + return found_nn + + def in_simplex(self, S, v_x, A_j0=None): + """Check if a vector v_x is in simplex `S`. + + Parameters + ---------- + S : array_like + Array containing simplex entries of vertices as rows + v_x : + A candidate vertex + A_j0 : array, optional, + Allows for A_j0 to be pre-calculated + + Returns + ------- + res : boolean + True if `v_x` is in `S` + """ + A_11 = numpy.delete(S, 0, 0) - S[0] + + sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) + if sign_det_A_11 == 0: + # NOTE: We keep the variable A_11, but we loop through A_jj + # ind= + # while sign_det_A_11 == 0: + # A_11 = numpy.delete(S, ind, 0) - S[ind] + # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) + + sign_det_A_11 = -1 # TODO: Choose another det of j instead? + # TODO: Unlikely to work in many cases + + if A_j0 is None: + A_j0 = S - v_x + + for d in range(self.dim + 1): + det_A_jj = (-1)**d * sign_det_A_11 + # TODO: Note that scipy might be faster to add as an optional + # dependency + sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d, + 0))) + # TODO: Note if sign_det_A_j0 == then the point is coplanar to the + # current simplex facet, so perhaps return True and attach? + if det_A_jj == sign_det_A_j0: + continue + else: + return False + + return True + + def deg_simplex(self, S, proj=None): + """Test a simplex S for degeneracy (linear dependence in R^dim). + + Parameters + ---------- + S : np.array + Simplex with rows as vertex vectors + proj : array, optional, + If the projection S[1:] - S[0] is already + computed it can be added as an optional argument. + """ + # Strategy: we test all combination of faces, if any of the + # determinants are zero then the vectors lie on the same face and is + # therefore linearly dependent in the space of R^dim + if proj is None: + proj = S[1:] - S[0] + + # TODO: Is checking the projection of one vertex against faces of other + # vertices sufficient? Or do we need to check more vertices in + # dimensions higher than 2? + # TODO: Literature seems to suggest using proj.T, but why is this + # needed? + if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance? + return True # Simplex is degenerate + else: + return False # Simplex is not degenerate diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py new file mode 100644 index 0000000000000000000000000000000000000000..e47558ee7b9a181638841c34bb63603b5d37e221 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py @@ -0,0 +1,460 @@ +import collections +from abc import ABC, abstractmethod + +import numpy as np + +from scipy._lib._util import MapWrapper + + +class VertexBase(ABC): + """ + Base class for a vertex. + """ + def __init__(self, x, nn=None, index=None): + """ + Initiation of a vertex object. + + Parameters + ---------- + x : tuple or vector + The geometric location (domain). + nn : list, optional + Nearest neighbour list. + index : int, optional + Index of vertex. + """ + self.x = x + self.hash = hash(self.x) # Save precomputed hash + + if nn is not None: + self.nn = set(nn) # can use .indexupdate to add a new list + else: + self.nn = set() + + self.index = index + + def __hash__(self): + return self.hash + + def __getattr__(self, item): + if item not in ['x_a']: + raise AttributeError(f"{type(self)} object has no attribute " + f"'{item}'") + if item == 'x_a': + self.x_a = np.array(self.x) + return self.x_a + + @abstractmethod + def connect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + @abstractmethod + def disconnect(self, v): + raise NotImplementedError("This method is only implemented with an " + "associated child of the base class.") + + def star(self): + """Returns the star domain ``st(v)`` of the vertex. + + Parameters + ---------- + v : + The vertex ``v`` in ``st(v)`` + + Returns + ------- + st : set + A set containing all the vertices in ``st(v)`` + """ + self.st = self.nn + self.st.add(self) + return self.st + + +class VertexScalarField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R associated with + the geometry built from the VertexBase class + """ + + def __init__(self, x, field=None, nn=None, index=None, field_args=(), + g_cons=None, g_cons_args=()): + """ + Parameters + ---------- + x : tuple, + vector of vertex coordinates + field : callable, optional + a scalar field f: R^n --> R associated with the geometry + nn : list, optional + list of nearest neighbours + index : int, optional + index of the vertex + field_args : tuple, optional + additional arguments to be passed to field + g_cons : callable, optional + constraints on the vertex + g_cons_args : tuple, optional + additional arguments to be passed to g_cons + + """ + super().__init__(x, nn=nn, index=index) + + # Note Vertex is only initiated once for all x so only + # evaluated once + # self.feasible = None + + # self.f is externally defined by the cache to allow parallel + # processing + # None type that will break arithmetic operations unless defined + # self.f = None + + self.check_min = True + self.check_max = True + + def connect(self, v): + """Connects self to another vertex object v. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + # Flags for checking homology properties: + self.check_min = True + self.check_max = True + v.check_min = True + v.check_max = True + + def minimiser(self): + """Check whether this vertex is strictly less than all its + neighbours""" + if self.check_min: + self._min = all(self.f < v.f for v in self.nn) + self.check_min = False + + return self._min + + def maximiser(self): + """ + Check whether this vertex is strictly greater than all its + neighbours. + """ + if self.check_max: + self._max = all(self.f > v.f for v in self.nn) + self.check_max = False + + return self._max + + +class VertexVectorField(VertexBase): + """ + Add homology properties of a scalar field f: R^n --> R^m associated with + the geometry built from the VertexBase class. + """ + + def __init__(self, x, sfield=None, vfield=None, field_args=(), + vfield_args=(), g_cons=None, + g_cons_args=(), nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + raise NotImplementedError("This class is still a work in progress") + + +class VertexCacheBase: + """Base class for a vertex cache for a simplicial complex.""" + def __init__(self): + + self.cache = collections.OrderedDict() + self.nfev = 0 # Feasible points + self.index = -1 + + def __iter__(self): + for v in self.cache: + yield self.cache[v] + return + + def size(self): + """Returns the size of the vertex cache.""" + return self.index + 1 + + def print_out(self): + headlen = len(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + print(f"Vertex cache of size: {len(self.cache)}:") + print('=' * headlen) + for v in self.cache: + self.cache[v].print_out() + + +class VertexCube(VertexBase): + """Vertex class to be used for a pure simplicial complex with no associated + differential geometry (single level domain that exists in R^n)""" + def __init__(self, x, nn=None, index=None): + super().__init__(x, nn=nn, index=index) + + def connect(self, v): + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + + +class VertexCacheIndex(VertexCacheBase): + def __init__(self): + """ + Class for a vertex cache for a simplicial complex without an associated + field. Useful only for building and visualising a domain complex. + + Parameters + ---------- + """ + super().__init__() + self.Vertex = VertexCube + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, index=self.index) + # logging.info("New generated vertex at x = {}".format(x)) + # NOTE: Surprisingly high performance increase if logging + # is commented out + self.cache[x] = xval + return self.cache[x] + + +class VertexCacheField(VertexCacheBase): + def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(), + workers=1): + """ + Class for a vertex cache for a simplicial complex with an associated + field. + + Parameters + ---------- + field : callable + Scalar or vector field callable. + field_args : tuple, optional + Any additional fixed parameters needed to completely specify the + field function + g_cons : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + g_cons_args : tuple, optional + Any additional fixed parameters needed to completely specify the + constraint functions + workers : int optional + Uses `multiprocessing.Pool `) to compute the field + functions in parallel. + + """ + super().__init__() + self.index = -1 + self.Vertex = VertexScalarField + self.field = field + self.field_args = field_args + self.wfield = FieldWrapper(field, field_args) # if workers is not 1 + + self.g_cons = g_cons + self.g_cons_args = g_cons_args + self.wgcons = ConstraintWrapper(g_cons, g_cons_args) + self.gpool = set() # A set of tuples to process for feasibility + + # Field processing objects + self.fpool = set() # A set of tuples to process for scalar function + self.sfc_lock = False # True if self.fpool is non-Empty + + self.workers = workers + self._mapwrapper = MapWrapper(workers) + + if workers == 1: + self.process_gpool = self.proc_gpool + if g_cons is None: + self.process_fpool = self.proc_fpool_nog + else: + self.process_fpool = self.proc_fpool_g + else: + self.process_gpool = self.pproc_gpool + if g_cons is None: + self.process_fpool = self.pproc_fpool_nog + else: + self.process_fpool = self.pproc_fpool_g + + def __getitem__(self, x, nn=None): + try: + return self.cache[x] + except KeyError: + self.index += 1 + xval = self.Vertex(x, field=self.field, nn=nn, index=self.index, + field_args=self.field_args, + g_cons=self.g_cons, + g_cons_args=self.g_cons_args) + + self.cache[x] = xval # Define in cache + self.gpool.add(xval) # Add to pool for processing feasibility + self.fpool.add(xval) # Add to pool for processing field values + return self.cache[x] + + def __getstate__(self): + self_dict = self.__dict__.copy() + del self_dict['pool'] + return self_dict + + def process_pools(self): + if self.g_cons is not None: + self.process_gpool() + self.process_fpool() + self.proc_minimisers() + + def feasibility_check(self, v): + v.feasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v.x_a, *args) < 0.0): + v.f = np.inf + v.feasible = False + break + + def compute_sfield(self, v): + """Compute the scalar field values of a vertex object `v`. + + Parameters + ---------- + v : VertexBase or VertexScalarField object + """ + try: + v.f = self.field(v.x_a, *self.field_args) + self.nfev += 1 + except AttributeError: + v.f = np.inf + # logging.warning(f"Field function not found at x = {self.x_a}") + if np.isnan(v.f): + v.f = np.inf + + def proc_gpool(self): + """Process all constraints.""" + if self.g_cons is not None: + for v in self.gpool: + self.feasibility_check(v) + # Clean the pool + self.gpool = set() + + def pproc_gpool(self): + """Process all constraints in parallel.""" + gpool_l = [] + for v in self.gpool: + gpool_l.append(v.x_a) + + G = self._mapwrapper(self.wgcons.gcons, gpool_l) + for v, g in zip(self.gpool, G): + v.feasible = g # set vertex object attribute v.feasible = g (bool) + + def proc_fpool_g(self): + """Process all field functions with constraints supplied.""" + for v in self.fpool: + if v.feasible: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def proc_fpool_nog(self): + """Process all field functions with no constraints supplied.""" + for v in self.fpool: + self.compute_sfield(v) + # Clean the pool + self.fpool = set() + + def pproc_fpool_g(self): + """ + Process all field functions with constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + if v.feasible: + fpool_l.append(v.x_a) + else: + v.f = np.inf + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def pproc_fpool_nog(self): + """ + Process all field functions with no constraints supplied in parallel. + """ + self.wfield.func + fpool_l = [] + for v in self.fpool: + fpool_l.append(v.x_a) + F = self._mapwrapper(self.wfield.func, fpool_l) + for va, f in zip(fpool_l, F): + vt = tuple(va) + self[vt].f = f # set vertex object attribute v.f = f + self.nfev += 1 + # Clean the pool + self.fpool = set() + + def proc_minimisers(self): + """Check for minimisers.""" + for v in self: + v.minimiser() + v.maximiser() + + +class ConstraintWrapper: + """Object to wrap constraints to pass to `multiprocessing.Pool`.""" + def __init__(self, g_cons, g_cons_args): + self.g_cons = g_cons + self.g_cons_args = g_cons_args + + def gcons(self, v_x_a): + vfeasible = True + for g, args in zip(self.g_cons, self.g_cons_args): + # constraint may return more than 1 value. + if np.any(g(v_x_a, *args) < 0.0): + vfeasible = False + break + return vfeasible + + +class FieldWrapper: + """Object to wrap field to pass to `multiprocessing.Pool`.""" + def __init__(self, field, field_args): + self.field = field + self.field_args = field_args + + def func(self, v_x_a): + try: + v_f = self.field(v_x_a, *self.field_args) + except Exception: + v_f = np.inf + if np.isnan(v_f): + v_f = np.inf + + return v_f diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..70ba01676864deb958edff9a54a65a25dec69747 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py new file mode 100644 index 0000000000000000000000000000000000000000..8a6543bf1ccbeffea3876699dfe32cdb6c531a04 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py @@ -0,0 +1,513 @@ +""" +This module implements the Sequential Least Squares Programming optimization +algorithm (SLSQP), originally developed by Dieter Kraft. +See http://www.netlib.org/toms/733 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + approx_jacobian + fmin_slsqp + +""" + +__all__ = ['approx_jacobian', 'fmin_slsqp'] + +import numpy as np +from scipy.optimize._slsqp import slsqp +from numpy import (zeros, array, linalg, append, concatenate, finfo, + sqrt, vstack, isfinite, atleast_1d) +from ._optimize import (OptimizeResult, _check_unknown_options, + _prepare_scalar_function, _clip_x_for_func, + _check_clip_x) +from ._numdiff import approx_derivative +from ._constraints import old_bound_to_new, _arr_to_scalar +from scipy._lib._array_api import atleast_nd, array_namespace + +# deprecated imports to be removed in SciPy 1.13.0 +from numpy import exp, inf # noqa: F401 + + +__docformat__ = "restructuredtext en" + +_epsilon = sqrt(finfo(float).eps) + + +def approx_jacobian(x, func, epsilon, *args): + """ + Approximate the Jacobian matrix of a callable function. + + Parameters + ---------- + x : array_like + The state vector at which to compute the Jacobian matrix. + func : callable f(x,*args) + The vector-valued function. + epsilon : float + The perturbation used to determine the partial derivatives. + args : sequence + Additional arguments passed to func. + + Returns + ------- + An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length + of the outputs of `func`, and ``lenx`` is the number of elements in + `x`. + + Notes + ----- + The approximation is done using forward differences. + + """ + # approx_derivative returns (m, n) == (lenf, lenx) + jac = approx_derivative(func, x, method='2-point', abs_step=epsilon, + args=args) + # if func returns a scalar jac.shape will be (lenx,). Make sure + # it's at least a 2D array. + return np.atleast_2d(jac) + + +def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, + bounds=(), fprime=None, fprime_eqcons=None, + fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, + iprint=1, disp=None, full_output=0, epsilon=_epsilon, + callback=None): + """ + Minimize a function using Sequential Least Squares Programming + + Python interface function for the SLSQP Optimization subroutine + originally implemented by Dieter Kraft. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. Must return a scalar. + x0 : 1-D ndarray of float + Initial guess for the independent variable(s). + eqcons : list, optional + A list of functions of length n such that + eqcons[j](x,*args) == 0.0 in a successfully optimized + problem. + f_eqcons : callable f(x,*args), optional + Returns a 1-D array in which each element must equal 0.0 in a + successfully optimized problem. If f_eqcons is specified, + eqcons is ignored. + ieqcons : list, optional + A list of functions of length n such that + ieqcons[j](x,*args) >= 0.0 in a successfully optimized + problem. + f_ieqcons : callable f(x,*args), optional + Returns a 1-D ndarray in which each element must be greater or + equal to 0.0 in a successfully optimized problem. If + f_ieqcons is specified, ieqcons is ignored. + bounds : list, optional + A list of tuples specifying the lower and upper bound + for each independent variable [(xl0, xu0),(xl1, xu1),...] + Infinite values will be interpreted as large floating values. + fprime : callable `f(x,*args)`, optional + A function that evaluates the partial derivatives of func. + fprime_eqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of equality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_eqcons should be sized as ( len(eqcons), len(x0) ). + fprime_ieqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of inequality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). + args : sequence, optional + Additional arguments passed to func and fprime. + iter : int, optional + The maximum number of iterations. + acc : float, optional + Requested accuracy. + iprint : int, optional + The verbosity of fmin_slsqp : + + * iprint <= 0 : Silent operation + * iprint == 1 : Print summary upon completion (default) + * iprint >= 2 : Print status of each iterate and summary + disp : int, optional + Overrides the iprint interface (preferred). + full_output : bool, optional + If False, return only the minimizer of func (default). + Otherwise, output final objective function and summary + information. + epsilon : float, optional + The step size for finite-difference derivative estimates. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + out : ndarray of float + The final minimizer of func. + fx : ndarray of float, if full_output is true + The final value of the objective function. + its : int, if full_output is true + The number of iterations. + imode : int, if full_output is true + The exit mode from the optimizer (see below). + smode : string, if full_output is true + Message describing the exit mode from the optimizer. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'SLSQP' `method` in particular. + + Notes + ----- + Exit modes are defined as follows :: + + -1 : Gradient evaluation required (g & a) + 0 : Optimization terminated successfully + 1 : Function evaluation required (f & c) + 2 : More equality constraints than independent variables + 3 : More than 3*n iterations in LSQ subproblem + 4 : Inequality constraints incompatible + 5 : Singular matrix E in LSQ subproblem + 6 : Singular matrix C in LSQ subproblem + 7 : Rank-deficient equality constraint subproblem HFTI + 8 : Positive directional derivative for linesearch + 9 : Iteration limit reached + + Examples + -------- + Examples are given :ref:`in the tutorial `. + + """ + if disp is not None: + iprint = disp + + opts = {'maxiter': iter, + 'ftol': acc, + 'iprint': iprint, + 'disp': iprint != 0, + 'eps': epsilon, + 'callback': callback} + + # Build the constraints as a tuple of dictionaries + cons = () + # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take + # the same extra arguments as the objective function. + cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) + cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) + # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian + # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments + # as the objective function. + if f_eqcons: + cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, + 'args': args}, ) + if f_ieqcons: + cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, + 'args': args}, ) + + res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, + constraints=cons, **opts) + if full_output: + return res['x'], res['fun'], res['nit'], res['status'], res['message'] + else: + return res['x'] + + +def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, + constraints=(), + maxiter=100, ftol=1.0E-6, iprint=1, disp=False, + eps=_epsilon, callback=None, finite_diff_rel_step=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using Sequential + Least Squares Programming (SLSQP). + + Options + ------- + ftol : float + Precision goal for the value of f in the stopping criterion. + eps : float + Step size used for numerical approximation of the Jacobian. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored and set to 0. + maxiter : int + Maximum number of iterations. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of `jac`. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + """ + _check_unknown_options(unknown_options) + iter = maxiter - 1 + acc = ftol + epsilon = eps + + if not disp: + iprint = 0 + + # Transform x0 into an array. + xp = array_namespace(x0) + x0 = atleast_nd(x0, ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x = xp.reshape(xp.astype(x0, dtype), -1) + + # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by + # ScalarFunction + if bounds is None or len(bounds) == 0: + new_bounds = (-np.inf, np.inf) + else: + new_bounds = old_bound_to_new(bounds) + + # clip the initial guess to bounds, otherwise ScalarFunction doesn't work + x = np.clip(x, new_bounds[0], new_bounds[1]) + + # Constraints are triaged per type into a dictionary of tuples + if isinstance(constraints, dict): + constraints = (constraints, ) + + cons = {'eq': (), 'ineq': ()} + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError as e: + raise KeyError('Constraint %d has no type defined.' % ic) from e + except TypeError as e: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') from e + except AttributeError as e: + raise TypeError("Constraint's type must be a string.") from e + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + + # check function + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + # check Jacobian + cjac = con.get('jac') + if cjac is None: + # approximate Jacobian function. The factory function is needed + # to keep a reference to `fun`, see gh-4240. + def cjac_factory(fun): + def cjac(x, *args): + x = _check_clip_x(x, new_bounds) + + if jac in ['2-point', '3-point', 'cs']: + return approx_derivative(fun, x, method=jac, args=args, + rel_step=finite_diff_rel_step, + bounds=new_bounds) + else: + return approx_derivative(fun, x, method='2-point', + abs_step=epsilon, args=args, + bounds=new_bounds) + + return cjac + cjac = cjac_factory(con['fun']) + + # update constraints' dictionary + cons[ctype] += ({'fun': con['fun'], + 'jac': cjac, + 'args': con.get('args', ())}, ) + + exit_modes = {-1: "Gradient evaluation required (g & a)", + 0: "Optimization terminated successfully", + 1: "Function evaluation required (f & c)", + 2: "More equality constraints than independent variables", + 3: "More than 3*n iterations in LSQ subproblem", + 4: "Inequality constraints incompatible", + 5: "Singular matrix E in LSQ subproblem", + 6: "Singular matrix C in LSQ subproblem", + 7: "Rank-deficient equality constraint subproblem HFTI", + 8: "Positive directional derivative for linesearch", + 9: "Iteration limit reached"} + + # Set the parameters that SLSQP will need + # meq, mieq: number of equality and inequality constraints + meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['eq']])) + mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['ineq']])) + # m = The total number of constraints + m = meq + mieq + # la = The number of constraints, or 1 if there are no constraints + la = array([1, m]).max() + # n = The number of independent variables + n = len(x) + + # Define the workspaces for SLSQP + n1 = n + 1 + mineq = m - meq + n1 + n1 + len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 + len_jw = mineq + w = zeros(len_w) + jw = zeros(len_jw) + + # Decompose bounds into xl and xu + if bounds is None or len(bounds) == 0: + xl = np.empty(n, dtype=float) + xu = np.empty(n, dtype=float) + xl.fill(np.nan) + xu.fill(np.nan) + else: + bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u)) + for (l, u) in bounds], float) + if bnds.shape[0] != n: + raise IndexError('SLSQP Error: the length of bounds is not ' + 'compatible with that of x0.') + + with np.errstate(invalid='ignore'): + bnderr = bnds[:, 0] > bnds[:, 1] + + if bnderr.any(): + raise ValueError('SLSQP Error: lb > ub in bounds %s.' % + ', '.join(str(b) for b in bnderr)) + xl, xu = bnds[:, 0], bnds[:, 1] + + # Mark infinite bounds with nans; the Fortran code understands this + infbnd = ~isfinite(bnds) + xl[infbnd[:, 0]] = np.nan + xu[infbnd[:, 1]] = np.nan + + # ScalarFunction provides function and gradient evaluation + sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this + # doesn't get sent to the func/grad evaluator. + wrapped_fun = _clip_x_for_func(sf.fun, new_bounds) + wrapped_grad = _clip_x_for_func(sf.grad, new_bounds) + + # Initialize the iteration counter and the mode value + mode = array(0, int) + acc = array(acc, float) + majiter = array(iter, int) + majiter_prev = 0 + + # Initialize internal SLSQP state variables + alpha = array(0, float) + f0 = array(0, float) + gs = array(0, float) + h1 = array(0, float) + h2 = array(0, float) + h3 = array(0, float) + h4 = array(0, float) + t = array(0, float) + t0 = array(0, float) + tol = array(0, float) + iexact = array(0, int) + incons = array(0, int) + ireset = array(0, int) + itermx = array(0, int) + line = array(0, int) + n1 = array(0, int) + n2 = array(0, int) + n3 = array(0, int) + + # Print the header if iprint >= 2 + if iprint >= 2: + print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) + + # mode is zero on entry, so call objective, constraints and gradients + # there should be no func evaluations here because it's cached from + # ScalarFunction + fx = wrapped_fun(x) + g = append(wrapped_grad(x), 0.0) + c = _eval_constraint(x, cons) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + while 1: + # Call SLSQP + slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw, + alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, + iexact, incons, ireset, itermx, line, + n1, n2, n3) + + if mode == 1: # objective and constraint evaluation required + fx = wrapped_fun(x) + c = _eval_constraint(x, cons) + + if mode == -1: # gradient evaluation required + g = append(wrapped_grad(x), 0.0) + a = _eval_con_normals(x, cons, la, n, m, meq, mieq) + + if majiter > majiter_prev: + # call callback if major iteration has incremented + if callback is not None: + callback(np.copy(x)) + + # Print the status of the current iterate if iprint > 2 + if iprint >= 2: + print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev, + fx, linalg.norm(g))) + + # If exit mode is not -1 or 1, slsqp has completed + if abs(mode) != 1: + break + + majiter_prev = int(majiter) + + # Optimization loop complete. Print status if requested + if iprint >= 1: + print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') + print(" Current function value:", fx) + print(" Iterations:", majiter) + print(" Function evaluations:", sf.nfev) + print(" Gradient evaluations:", sf.ngev) + + return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), + nfev=sf.nfev, njev=sf.ngev, status=int(mode), + message=exit_modes[int(mode)], success=(mode == 0)) + + +def _eval_constraint(x, cons): + # Compute constraints + if cons['eq']: + c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['eq']]) + else: + c_eq = zeros(0) + + if cons['ineq']: + c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['ineq']]) + else: + c_ieq = zeros(0) + + # Now combine c_eq and c_ieq into a single matrix + c = concatenate((c_eq, c_ieq)) + return c + + +def _eval_con_normals(x, cons, la, n, m, meq, mieq): + # Compute the normals of the constraints + if cons['eq']: + a_eq = vstack([con['jac'](x, *con['args']) + for con in cons['eq']]) + else: # no equality constraint + a_eq = zeros((meq, n)) + + if cons['ineq']: + a_ieq = vstack([con['jac'](x, *con['args']) + for con in cons['ineq']]) + else: # no inequality constraint + a_ieq = zeros((mieq, n)) + + # Now combine a_eq and a_ieq into a single a matrix + if m == 0: # no constraints + a = zeros((la, n)) + else: + a = vstack((a_eq, a_ieq)) + a = concatenate((a, zeros([la, 1])), 1) + + return a diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_spectral.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff5bef0283b2d6b6c018c1c8b98cd46a335d7cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_spectral.py @@ -0,0 +1,260 @@ +""" +Spectral Algorithm for Nonlinear Equations +""" +import collections + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize._optimize import _check_unknown_options +from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng + +class _NoConvergence(Exception): + pass + + +def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, + fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, + sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): + r""" + Solve nonlinear equation with the DF-SANE method + + Options + ------- + ftol : float, optional + Relative norm tolerance. + fatol : float, optional + Absolute norm tolerance. + Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. + fnorm : callable, optional + Norm to use in the convergence check. If None, 2-norm is used. + maxfev : int, optional + Maximum number of function evaluations. + disp : bool, optional + Whether to print convergence process to stdout. + eta_strategy : callable, optional + Choice of the ``eta_k`` parameter, which gives slack for growth + of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with + `k` the iteration number, `x` the current iterate and `F` the current + residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. + Default: ``||F||**2 / (1 + k)**2``. + sigma_eps : float, optional + The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. + Default: 1e-10 + sigma_0 : float, optional + Initial spectral coefficient. + Default: 1.0 + M : int, optional + Number of iterates to include in the nonmonotonic line search. + Default: 10 + line_search : {'cruz', 'cheng'} + Type of line search to employ. 'cruz' is the original one defined in + [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is + a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. + Default: 'cruz' + + References + ---------- + .. [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). + .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). + + """ + _check_unknown_options(unknown_options) + + if line_search not in ('cheng', 'cruz'): + raise ValueError(f"Invalid value {line_search!r} for 'line_search'") + + nexp = 2 + + if eta_strategy is None: + # Different choice from [1], as their eta is not invariant + # vs. scaling of F. + def eta_strategy(k, x, F): + # Obtain squared 2-norm of the initial residual from the outer scope + return f_0 / (1 + k)**2 + + if fnorm is None: + def fnorm(F): + # Obtain squared 2-norm of the current residual from the outer scope + return f_k**(1.0/nexp) + + def fmerit(F): + return np.linalg.norm(F)**nexp + + nfev = [0] + f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, + nfev, maxfev, args) + + k = 0 + f_0 = f_k + sigma_k = sigma_0 + + F_0_norm = fnorm(F_k) + + # For the 'cruz' line search + prev_fs = collections.deque([f_k], M) + + # For the 'cheng' line search + Q = 1.0 + C = f_0 + + converged = False + message = "too many function evaluations required" + + while True: + F_k_norm = fnorm(F_k) + + if disp: + print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) + + if callback is not None: + callback(x_k, F_k) + + if F_k_norm < ftol * F_0_norm + fatol: + # Converged! + message = "successful convergence" + converged = True + break + + # Control spectral parameter, from [2] + if abs(sigma_k) > 1/sigma_eps: + sigma_k = 1/sigma_eps * np.sign(sigma_k) + elif abs(sigma_k) < sigma_eps: + sigma_k = sigma_eps + + # Line search direction + d = -sigma_k * F_k + + # Nonmonotone line search + eta = eta_strategy(k, x_k, F_k) + try: + if line_search == 'cruz': + alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, + eta=eta) + elif line_search == 'cheng': + alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, + C, Q, eta=eta) + except _NoConvergence: + break + + # Update spectral parameter + s_k = xp - x_k + y_k = Fp - F_k + sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) + + # Take step + x_k = xp + F_k = Fp + f_k = fp + + # Store function value + if line_search == 'cruz': + prev_fs.append(fp) + + k += 1 + + x = _wrap_result(x_k, is_complex, shape=x_shape) + F = _wrap_result(F_k, is_complex) + + result = OptimizeResult(x=x, success=converged, + message=message, + fun=F, nfev=nfev[0], nit=k, method="df-sane") + + return result + + +def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): + """ + Wrap a function and an initial value so that (i) complex values + are wrapped to reals, and (ii) value for a merit function + fmerit(x, f) is computed at the same time, (iii) iteration count + is maintained and an exception is raised if it is exceeded. + + Parameters + ---------- + func : callable + Function to wrap + x0 : ndarray + Initial value + fmerit : callable + Merit function fmerit(f) for computing merit value from residual. + nfev_list : list + List to store number of evaluations in. Should be [0] in the beginning. + maxfev : int + Maximum number of evaluations before _NoConvergence is raised. + args : tuple + Extra arguments to func + + Returns + ------- + wrap_func : callable + Wrapped function, to be called as + ``F, fp = wrap_func(x0)`` + x0_wrap : ndarray of float + Wrapped initial value; raveled to 1-D and complex + values mapped to reals. + x0_shape : tuple + Shape of the initial value array + f : float + Merit function at F + F : ndarray of float + Residual at x0_wrap + is_complex : bool + Whether complex values were mapped to reals + + """ + x0 = np.asarray(x0) + x0_shape = x0.shape + F = np.asarray(func(x0, *args)).ravel() + is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) + x0 = x0.ravel() + + nfev_list[0] = 1 + + if is_complex: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + z = _real2complex(x).reshape(x0_shape) + v = np.asarray(func(z, *args)).ravel() + F = _complex2real(v) + f = fmerit(F) + return f, F + + x0 = _complex2real(x0) + F = _complex2real(F) + else: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + x = x.reshape(x0_shape) + F = np.asarray(func(x, *args)).ravel() + f = fmerit(F) + return f, F + + return wrap_func, x0, x0_shape, fmerit(F), F, is_complex + + +def _wrap_result(result, is_complex, shape=None): + """ + Convert from real to complex and reshape result arrays. + """ + if is_complex: + z = _real2complex(result) + else: + z = result + if shape is not None: + z = z.reshape(shape) + return z + + +def _real2complex(x): + return np.ascontiguousarray(x, dtype=float).view(np.complex128) + + +def _complex2real(z): + return np.ascontiguousarray(z, dtype=complex).view(np.float64) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tnc.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0b3be740368eb759d608b541930dbb88ec042b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tnc.py @@ -0,0 +1,430 @@ +# TNC Python interface +# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ + +# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: + +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +TNC: A Python interface to the TNC non-linear optimizer + +TNC is a non-linear optimizer. To use it, you must provide a function to +minimize. The function must take one argument: the list of coordinates where to +evaluate the function; and it must return either a tuple, whose first element is the +value of the function, and whose second argument is the gradient of the function +(as a list of values); or None, to abort the minimization. +""" + +from scipy.optimize import _moduleTNC as moduleTNC +from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options, + _prepare_scalar_function) +from ._constraints import old_bound_to_new +from scipy._lib._array_api import atleast_nd, array_namespace + +from numpy import inf, array, zeros + +__all__ = ['fmin_tnc'] + + +MSG_NONE = 0 # No messages +MSG_ITER = 1 # One line per iteration +MSG_INFO = 2 # Informational messages +MSG_VERS = 4 # Version info +MSG_EXIT = 8 # Exit reasons +MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT + +MSGS = { + MSG_NONE: "No messages", + MSG_ITER: "One line per iteration", + MSG_INFO: "Informational messages", + MSG_VERS: "Version info", + MSG_EXIT: "Exit reasons", + MSG_ALL: "All messages" +} + +INFEASIBLE = -1 # Infeasible (lower bound > upper bound) +LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) +FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) +XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) +MAXFUN = 3 # Max. number of function evaluations reached +LSFAIL = 4 # Linear search failed +CONSTANT = 5 # All lower bounds are equal to the upper bounds +NOPROGRESS = 6 # Unable to progress +USERABORT = 7 # User requested end of minimization + +RCSTRINGS = { + INFEASIBLE: "Infeasible (lower bound > upper bound)", + LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", + FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", + XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", + MAXFUN: "Max. number of function evaluations reached", + LSFAIL: "Linear search failed", + CONSTANT: "All lower bounds are equal to the upper bounds", + NOPROGRESS: "Unable to progress", + USERABORT: "User requested end of minimization" +} + +# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in +# SciPy + + +def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, + bounds=None, epsilon=1e-8, scale=None, offset=None, + messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, + stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, + rescale=-1, disp=None, callback=None): + """ + Minimize a function with variables subject to bounds, using + gradient information in a truncated Newton algorithm. This + method wraps a C implementation of the algorithm. + + Parameters + ---------- + func : callable ``func(x, *args)`` + Function to minimize. Must do one of: + + 1. Return f and g, where f is the value of the function and g its + gradient (a list of floats). + + 2. Return the function value but supply gradient function + separately as `fprime`. + + 3. Return the function value and set ``approx_grad=True``. + + If the function returns None, the minimization + is aborted. + x0 : array_like + Initial estimate of minimum. + fprime : callable ``fprime(x, *args)``, optional + Gradient of `func`. If None, then either `func` must return the + function value and the gradient (``f,g = func(x, *args)``) + or `approx_grad` must be True. + args : tuple, optional + Arguments to pass to function. + approx_grad : bool, optional + If true, approximate the gradient numerically. + bounds : list, optional + (min, max) pairs for each element in x0, defining the + bounds on that parameter. Use None or +/-inf for one of + min or max when there is no bound in that direction. + epsilon : float, optional + Used if approx_grad is True. The stepsize in a finite + difference approximation for fprime. + scale : array_like, optional + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x| for the others. Defaults to None. + offset : array_like, optional + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + messages : int, optional + Bit mask used to select messages display during + minimization values defined in the MSGS dict. Defaults to + MGS_ALL. + disp : int, optional + Integer interface to messages. 0 = no message, 5 = all messages + maxCGit : int, optional + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + maxfun : int, optional + Maximum number of function evaluation. If None, maxfun is + set to max(100, 10*len(x0)). Defaults to None. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + eta : float, optional + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float, optional + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float, optional + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + fmin : float, optional + Minimum function value estimate. Defaults to 0. + ftol : float, optional + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float, optional + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + pgtol : float, optional + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float, optional + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + + Returns + ------- + x : ndarray + The solution. + nfeval : int + The number of function evaluations. + rc : int + Return code, see below + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'TNC' `method` in particular. + + Notes + ----- + The underlying algorithm is truncated Newton, also called + Newton Conjugate-Gradient. This method differs from + scipy.optimize.fmin_ncg in that + + 1. it wraps a C implementation of the algorithm + 2. it allows each variable to be given an upper and lower bound. + + The algorithm incorporates the bound constraints by determining + the descent direction as in an unconstrained truncated Newton, + but never taking a step-size large enough to leave the space + of feasible x's. The algorithm keeps track of a set of + currently active constraints, and ignores them when computing + the minimum allowable step size. (The x's associated with the + active constraint are kept fixed.) If the maximum allowable + step size is zero then a new constraint is added. At the end + of each iteration one of the constraints may be deemed no + longer active and removed. A constraint is considered + no longer active is if it is currently active + but the gradient for that variable points inward from the + constraint. The specific constraint removed is the one + associated with the variable of largest index whose + constraint is no longer active. + + Return codes are defined as follows:: + + -1 : Infeasible (lower bound > upper bound) + 0 : Local minimum reached (|pg| ~= 0) + 1 : Converged (|f_n-f_(n-1)| ~= 0) + 2 : Converged (|x_n-x_(n-1)| ~= 0) + 3 : Max. number of function evaluations reached + 4 : Linear search failed + 5 : All lower bounds are equal to the upper bounds + 6 : Unable to progress + 7 : User requested end of minimization + + References + ---------- + Wright S., Nocedal J. (2006), 'Numerical Optimization' + + Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", + SIAM Journal of Numerical Analysis 21, pp. 770-778 + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + if disp is not None: # disp takes precedence over messages + mesg_num = disp + else: + mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) + # build options + opts = {'eps': epsilon, + 'scale': scale, + 'offset': offset, + 'mesg_num': mesg_num, + 'maxCGit': maxCGit, + 'maxfun': maxfun, + 'eta': eta, + 'stepmx': stepmx, + 'accuracy': accuracy, + 'minfev': fmin, + 'ftol': ftol, + 'xtol': xtol, + 'gtol': pgtol, + 'rescale': rescale, + 'disp': False} + + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) + + return res['x'], res['nfev'], res['status'] + + +def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, + eps=1e-8, scale=None, offset=None, mesg_num=None, + maxCGit=-1, eta=-1, stepmx=0, accuracy=0, + minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, + callback=None, finite_diff_rel_step=None, maxfun=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using a truncated + Newton (TNC) algorithm. + + Options + ------- + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + scale : list of floats + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x] for the others. Defaults to None. + offset : float + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + disp : bool + Set to True to print convergence messages. + maxCGit : int + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + eta : float + Severity of the line search. If < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + minfev : float + Minimum function value estimate. Defaults to 0. + ftol : float + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + gtol : float + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + maxfun : int + Maximum number of function evaluations. If None, `maxfun` is + set to max(100, 10*len(x0)). Defaults to None. + """ + _check_unknown_options(unknown_options) + fmin = minfev + pgtol = gtol + + xp = array_namespace(x0) + x0 = atleast_nd(x0, ndim=1, xp=xp) + dtype = xp.float64 + if xp.isdtype(x0.dtype, "real floating"): + dtype = x0.dtype + x0 = xp.reshape(xp.astype(x0, dtype), -1) + + n = len(x0) + + if bounds is None: + bounds = [(None,None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + new_bounds = old_bound_to_new(bounds) + + if mesg_num is not None: + messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) + elif disp: + messages = MSG_ALL + else: + messages = MSG_NONE + + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + finite_diff_rel_step=finite_diff_rel_step, + bounds=new_bounds) + func_and_grad = sf.fun_and_grad + + """ + low, up : the bounds (lists of floats) + if low is None, the lower bounds are removed. + if up is None, the upper bounds are removed. + low and up defaults to None + """ + low = zeros(n) + up = zeros(n) + for i in range(n): + if bounds[i] is None: + l, u = -inf, inf + else: + l,u = bounds[i] + if l is None: + low[i] = -inf + else: + low[i] = l + if u is None: + up[i] = inf + else: + up[i] = u + + if scale is None: + scale = array([]) + + if offset is None: + offset = array([]) + + if maxfun is None: + maxfun = max(100, 10*len(x0)) + + rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize( + func_and_grad, x0, low, up, scale, + offset, messages, maxCGit, maxfun, + eta, stepmx, accuracy, fmin, ftol, + xtol, pgtol, rescale, callback + ) + # the TNC documentation states: "On output, x, f and g may be very + # slightly out of sync because of scaling". Therefore re-evaluate + # func_and_grad so they are synced. + funv, jacv = func_and_grad(x) + + return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev, + nit=nit, status=rc, message=RCSTRINGS[rc], + success=(-1 < rc < 3)) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..537b73b3aeb36df09863a0cd24957e5612deb030 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py @@ -0,0 +1,12 @@ +from ._trlib import TRLIBQuadraticSubproblem + +__all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem'] + + +def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False): + def subproblem_factory(x, fun, jac, hess, hessp): + return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp, + tol_rel_i=tol_rel_i, + tol_rel_b=tol_rel_b, + disp=disp) + return subproblem_factory diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79319053dee9af0589844a6d59257ca8bd82cbe4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0e76c61bbe93a5017ed6dab5172b0972e9f885a3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion.py new file mode 100644 index 0000000000000000000000000000000000000000..f2355cf68ac8e1cac7e2688a9b91364ff2b2dcee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion.py @@ -0,0 +1,304 @@ +"""Trust-region optimization.""" +import math +import warnings + +import numpy as np +import scipy.linalg +from ._optimize import (_check_unknown_options, _status_message, + OptimizeResult, _prepare_scalar_function, + _call_callback_maybe_halt) +from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy +from scipy.optimize._differentiable_functions import FD_METHODS +__all__ = [] + + +def _wrap_function(function, args): + # wraps a minimizer function to count number of evaluations + # and to easily provide an args kwd. + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(x, *wrapper_args): + ncalls[0] += 1 + # A copy of x is sent to the user function (gh13740) + return function(np.copy(x), *(wrapper_args + args)) + + return ncalls, function_wrapper + + +class BaseQuadraticSubproblem: + """ + Base/abstract class defining the quadratic model for trust-region + minimization. Child classes must implement the ``solve`` method. + + Values of the objective function, Jacobian and Hessian (if provided) at + the current iterate ``x`` are evaluated on demand and then stored as + attributes ``fun``, ``jac``, ``hess``. + """ + + def __init__(self, x, fun, jac, hess=None, hessp=None): + self._x = x + self._f = None + self._g = None + self._h = None + self._g_mag = None + self._cauchy_point = None + self._newton_point = None + self._fun = fun + self._jac = jac + self._hess = hess + self._hessp = hessp + + def __call__(self, p): + return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) + + @property + def fun(self): + """Value of objective function at current iteration.""" + if self._f is None: + self._f = self._fun(self._x) + return self._f + + @property + def jac(self): + """Value of Jacobian of objective function at current iteration.""" + if self._g is None: + self._g = self._jac(self._x) + return self._g + + @property + def hess(self): + """Value of Hessian of objective function at current iteration.""" + if self._h is None: + self._h = self._hess(self._x) + return self._h + + def hessp(self, p): + if self._hessp is not None: + return self._hessp(self._x, p) + else: + return np.dot(self.hess, p) + + @property + def jac_mag(self): + """Magnitude of jacobian of objective function at current iteration.""" + if self._g_mag is None: + self._g_mag = scipy.linalg.norm(self.jac) + return self._g_mag + + def get_boundaries_intersections(self, z, d, trust_radius): + """ + Solve the scalar quadratic equation ``||z + t d|| == trust_radius``. + This is like a line-sphere intersection. + Return the two values of t, sorted from low to high. + """ + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + sqrt_discriminant = math.sqrt(b*b - 4*a*c) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + math.copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + return sorted([ta, tb]) + + def solve(self, trust_radius): + raise NotImplementedError('The solve method should be implemented by ' + 'the child class') + + +def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, + subproblem=None, initial_trust_radius=1.0, + max_trust_radius=1000.0, eta=0.15, gtol=1e-4, + maxiter=None, disp=False, return_all=False, + callback=None, inexact=True, **unknown_options): + """ + Minimization of scalar function of one or more variables using a + trust-region algorithm. + + Options for the trust-region algorithm are: + initial_trust_radius : float + Initial trust radius. + max_trust_radius : float + Never propose steps that are longer than this value. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` + before successful termination. + maxiter : int + Maximum number of iterations to perform. + disp : bool + If True, print convergence message. + inexact : bool + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. Only effective for method + trust-krylov. + + This function is called by the `minimize` function. + It is not supposed to be called directly. + """ + _check_unknown_options(unknown_options) + + if jac is None: + raise ValueError('Jacobian is currently required for trust-region ' + 'methods') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + if subproblem is None: + raise ValueError('A subproblem solving strategy is required for ' + 'trust-region methods') + if not (0 <= eta < 0.25): + raise Exception('invalid acceptance stringency') + if max_trust_radius <= 0: + raise Exception('the max trust radius must be positive') + if initial_trust_radius <= 0: + raise ValueError('the initial trust radius must be positive') + if initial_trust_radius >= max_trust_radius: + raise ValueError('the initial trust radius must be less than the ' + 'max trust radius') + + # force the initial guess into a nice format + x0 = np.asarray(x0).flatten() + + # A ScalarFunction representing the problem. This caches calls to fun, jac, + # hess. + sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args) + fun = sf.fun + jac = sf.grad + if callable(hess): + hess = sf.hess + elif callable(hessp): + # this elif statement must come before examining whether hess + # is estimated by FD methods or a HessianUpdateStrategy + pass + elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): + # If the Hessian is being estimated by finite differences or a + # Hessian update strategy then ScalarFunction.hess returns a + # LinearOperator or a HessianUpdateStrategy. This enables the + # calculation/creation of a hessp. BUT you only want to do this + # if the user *hasn't* provided a callable(hessp) function. + hess = None + + def hessp(x, p, *args): + return sf.hess(x).dot(p) + else: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + + # ScalarFunction doesn't represent hessp + nhessp, hessp = _wrap_function(hessp, args) + + # limit the number of iterations + if maxiter is None: + maxiter = len(x0)*200 + + # init the search status + warnflag = 0 + + # initialize the search + trust_radius = initial_trust_radius + x = x0 + if return_all: + allvecs = [x] + m = subproblem(x, fun, jac, hess, hessp) + k = 0 + + # search for the function min + # do not even start if the gradient is small enough + while m.jac_mag >= gtol: + + # Solve the sub-problem. + # This gives us the proposed step relative to the current position + # and it tells us whether the proposed step + # has reached the trust region boundary or not. + try: + p, hits_boundary = m.solve(trust_radius) + except np.linalg.LinAlgError: + warnflag = 3 + break + + # calculate the predicted value at the proposed point + predicted_value = m(p) + + # define the local approximation at the proposed point + x_proposed = x + p + m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) + + # evaluate the ratio defined in equation (4.4) + actual_reduction = m.fun - m_proposed.fun + predicted_reduction = m.fun - predicted_value + if predicted_reduction <= 0: + warnflag = 2 + break + rho = actual_reduction / predicted_reduction + + # update the trust radius according to the actual/predicted ratio + if rho < 0.25: + trust_radius *= 0.25 + elif rho > 0.75 and hits_boundary: + trust_radius = min(2*trust_radius, max_trust_radius) + + # if the ratio is high enough then accept the proposed step + if rho > eta: + x = x_proposed + m = m_proposed + + # append the best guess, call back, increment the iteration count + if return_all: + allvecs.append(np.copy(x)) + k += 1 + + intermediate_result = OptimizeResult(x=x, fun=m.fun) + if _call_callback_maybe_halt(callback, intermediate_result): + break + + # check if the gradient is small enough to stop + if m.jac_mag < gtol: + warnflag = 0 + break + + # check if we have looked at enough iterations + if k >= maxiter: + warnflag = 1 + break + + # print some stuff if requested + status_messages = ( + _status_message['success'], + _status_message['maxiter'], + 'A bad approximation caused failure to predict improvement.', + 'A linalg error occurred, such as a non-psd Hessian.', + ) + if disp: + if warnflag == 0: + print(status_messages[warnflag]) + else: + warnings.warn(status_messages[warnflag], RuntimeWarning, stacklevel=3) + print(" Current function value: %f" % m.fun) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % sf.nfev) + print(" Gradient evaluations: %d" % sf.ngev) + print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0])) + + result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, + fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev, + nhev=sf.nhev + nhessp[0], nit=k, + message=status_messages[warnflag]) + + if hess is not None: + result['hess'] = m.hess + + if return_all: + result['allvecs'] = allvecs + + return result diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549cfb9760dda474cb858b7b36d236af48111067 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py @@ -0,0 +1,6 @@ +"""This module contains the equality constrained SQP solver.""" + + +from .minimize_trustregion_constr import _minimize_trustregion_constr + +__all__ = ['_minimize_trustregion_constr'] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ad583bb8eee524d35c2e5bb16934f78629cd69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py @@ -0,0 +1,390 @@ +import numpy as np +import scipy.sparse as sps + + +class CanonicalConstraint: + """Canonical constraint to use with trust-constr algorithm. + + It represents the set of constraints of the form:: + + f_eq(x) = 0 + f_ineq(x) <= 0 + + where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see + below. + + The class is supposed to be instantiated by factory methods, which + should prepare the parameters listed below. + + Parameters + ---------- + n_eq, n_ineq : int + Number of equality and inequality constraints respectively. + fun : callable + Function defining the constraints. The signature is + ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` + components and ``c_ineq`` is ndarray with `n_ineq` components. + jac : callable + Function to evaluate the Jacobian of the constraint. The signature + is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are + either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n), + respectively. + hess : callable + Function to evaluate the Hessian of the constraints multiplied + by Lagrange multipliers, that is + ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is + ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied + shape (n, n) and provide a matrix-vector product operation + ``H.dot(p)``. + keep_feasible : ndarray, shape (n_ineq,) + Mask indicating which inequality constraints should be kept feasible. + """ + def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): + self.n_eq = n_eq + self.n_ineq = n_ineq + self.fun = fun + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + @classmethod + def from_PreparedConstraint(cls, constraint): + """Create an instance from `PreparedConstrained` object.""" + lb, ub = constraint.bounds + cfun = constraint.fun + keep_feasible = constraint.keep_feasible + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + elif np.all(lb == ub): + return cls._equal_to_canonical(cfun, lb) + elif np.all(lb == -np.inf): + return cls._less_to_canonical(cfun, ub, keep_feasible) + elif np.all(ub == np.inf): + return cls._greater_to_canonical(cfun, lb, keep_feasible) + else: + return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) + + @classmethod + def empty(cls, n): + """Create an "empty" instance. + + This "empty" instance is required to allow working with unconstrained + problems as if they have some constraints. + """ + empty_fun = np.empty(0) + empty_jac = np.empty((0, n)) + empty_hess = sps.csr_matrix((n, n)) + + def fun(x): + return empty_fun, empty_fun + + def jac(x): + return empty_jac, empty_jac + + def hess(x, v_eq, v_ineq): + return empty_hess + + return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_)) + + @classmethod + def concatenate(cls, canonical_constraints, sparse_jacobian): + """Concatenate multiple `CanonicalConstraint` into one. + + `sparse_jacobian` (bool) determines the Jacobian format of the + concatenated constraint. Note that items in `canonical_constraints` + must have their Jacobians in the same format. + """ + def fun(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.fun(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return np.hstack(eq_all), np.hstack(ineq_all) + + if sparse_jacobian: + vstack = sps.vstack + else: + vstack = np.vstack + + def jac(x): + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.jac(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + + return vstack(eq_all), vstack(ineq_all) + + def hess(x, v_eq, v_ineq): + hess_all = [] + index_eq = 0 + index_ineq = 0 + for c in canonical_constraints: + vc_eq = v_eq[index_eq:index_eq + c.n_eq] + vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] + hess_all.append(c.hess(x, vc_eq, vc_ineq)) + index_eq += c.n_eq + index_ineq += c.n_ineq + + def matvec(p): + result = np.zeros_like(p) + for h in hess_all: + result += h.dot(p) + return result + + n = x.shape[0] + return sps.linalg.LinearOperator((n, n), matvec, dtype=float) + + n_eq = sum(c.n_eq for c in canonical_constraints) + n_ineq = sum(c.n_ineq for c in canonical_constraints) + keep_feasible = np.hstack([c.keep_feasible for c in + canonical_constraints]) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _equal_to_canonical(cls, cfun, value): + empty_fun = np.empty(0) + n = cfun.n + + n_eq = value.shape[0] + n_ineq = 0 + keep_feasible = np.empty(0, dtype=bool) + + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + def fun(x): + return cfun.fun(x) - value, empty_fun + + def jac(x): + return cfun.jac(x), empty_jac + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_eq) + + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _less_to_canonical(cls, cfun, ub, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_ub = ub < np.inf + n_eq = 0 + n_ineq = np.sum(finite_ub) + + if np.all(finite_ub): + def fun(x): + return empty_fun, cfun.fun(x) - ub + + def jac(x): + return empty_jac, cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_ineq) + else: + finite_ub = np.nonzero(finite_ub)[0] + keep_feasible = keep_feasible[finite_ub] + ub = ub[finite_ub] + + def fun(x): + return empty_fun, cfun.fun(x)[finite_ub] - ub + + def jac(x): + return empty_jac, cfun.jac(x)[finite_ub] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_ub] = v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _greater_to_canonical(cls, cfun, lb, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_lb = lb > -np.inf + n_eq = 0 + n_ineq = np.sum(finite_lb) + + if np.all(finite_lb): + def fun(x): + return empty_fun, lb - cfun.fun(x) + + def jac(x): + return empty_jac, -cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, -v_ineq) + else: + finite_lb = np.nonzero(finite_lb)[0] + keep_feasible = keep_feasible[finite_lb] + lb = lb[finite_lb] + + def fun(x): + return empty_fun, lb - cfun.fun(x)[finite_lb] + + def jac(x): + return empty_jac, -cfun.jac(x)[finite_lb] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_lb] = -v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + equal = np.nonzero(equal)[0] + less = np.nonzero(less)[0] + greater = np.nonzero(greater)[0] + interval = np.nonzero(interval)[0] + n_less = less.shape[0] + n_greater = greater.shape[0] + n_interval = interval.shape[0] + n_ineq = n_less + n_greater + 2 * n_interval + n_eq = equal.shape[0] + + keep_feasible = np.hstack((keep_feasible[less], + keep_feasible[greater], + keep_feasible[interval], + keep_feasible[interval])) + + def fun(x): + f = cfun.fun(x) + eq = f[equal] - lb[equal] + le = f[less] - ub[less] + ge = lb[greater] - f[greater] + il = f[interval] - ub[interval] + ig = lb[interval] - f[interval] + return eq, np.hstack((le, ge, il, ig)) + + def jac(x): + J = cfun.jac(x) + eq = J[equal] + le = J[less] + ge = -J[greater] + il = J[interval] + ig = -il + if sps.issparse(J): + ineq = sps.vstack((le, ge, il, ig)) + else: + ineq = np.vstack((le, ge, il, ig)) + return eq, ineq + + def hess(x, v_eq, v_ineq): + n_start = 0 + v_l = v_ineq[n_start:n_start + n_less] + n_start += n_less + v_g = v_ineq[n_start:n_start + n_greater] + n_start += n_greater + v_il = v_ineq[n_start:n_start + n_interval] + n_start += n_interval + v_ig = v_ineq[n_start:n_start + n_interval] + + v = np.zeros_like(lb) + v[equal] = v_eq + v[less] = v_l + v[greater] = -v_g + v[interval] = v_il - v_ig + + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + +def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): + """Convert initial values of the constraints to the canonical format. + + The purpose to avoid one additional call to the constraints at the initial + point. It takes saved values in `PreparedConstraint`, modififies and + concatenates them to the canonical constraint format. + """ + c_eq = [] + c_ineq = [] + J_eq = [] + J_ineq = [] + + for c in prepared_constraints: + f = c.fun.f + J = c.fun.J + lb, ub = c.bounds + if np.all(lb == ub): + c_eq.append(f - lb) + J_eq.append(J) + elif np.all(lb == -np.inf): + finite_ub = ub < np.inf + c_ineq.append(f[finite_ub] - ub[finite_ub]) + J_ineq.append(J[finite_ub]) + elif np.all(ub == np.inf): + finite_lb = lb > -np.inf + c_ineq.append(lb[finite_lb] - f[finite_lb]) + J_ineq.append(-J[finite_lb]) + else: + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + c_eq.append(f[equal] - lb[equal]) + c_ineq.append(f[less] - ub[less]) + c_ineq.append(lb[greater] - f[greater]) + c_ineq.append(f[interval] - ub[interval]) + c_ineq.append(lb[interval] - f[interval]) + + J_eq.append(J[equal]) + J_ineq.append(J[less]) + J_ineq.append(-J[greater]) + J_ineq.append(J[interval]) + J_ineq.append(-J[interval]) + + c_eq = np.hstack(c_eq) if c_eq else np.empty(0) + c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) + + if sparse_jacobian: + vstack = sps.vstack + empty = sps.csr_matrix((0, n)) + else: + vstack = np.vstack + empty = np.empty((0, n)) + + J_eq = vstack(J_eq) if J_eq else empty + J_ineq = vstack(J_ineq) if J_ineq else empty + + return c_eq, c_ineq, J_eq, J_ineq diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py new file mode 100644 index 0000000000000000000000000000000000000000..d50e1e792bef91207aa20447bd36386925d38d28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py @@ -0,0 +1,217 @@ +"""Byrd-Omojokun Trust-Region SQP method.""" + +from scipy.sparse import eye as speye +from .projections import projections +from .qp_subproblem import modified_dogleg, projected_cg, box_intersections +import numpy as np +from numpy.linalg import norm + +__all__ = ['equality_constrained_sqp'] + + +def default_scaling(x): + n, = np.shape(x) + return speye(n) + + +def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess, + x0, fun0, grad0, constr0, + jac0, stop_criteria, + state, + initial_penalty, + initial_trust_radius, + factorization_method, + trust_lb=None, + trust_ub=None, + scaling=default_scaling): + """Solve nonlinear equality-constrained problem using trust-region SQP. + + Solve optimization problem: + + minimize fun(x) + subject to: constr(x) = 0 + + using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several + implementation details are based on [2]_ and [3]_, p. 549. + + References + ---------- + .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the + implementation of an algorithm for large-scale equality + constrained optimization." SIAM Journal on + Optimization 8.3 (1998): 682-706. + .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891. + LARGE_REDUCTION_RATIO = 0.9 + INTERMEDIARY_REDUCTION_RATIO = 0.3 + SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892. + TRUST_ENLARGEMENT_FACTOR_L = 7.0 + TRUST_ENLARGEMENT_FACTOR_S = 2.0 + MAX_TRUST_REDUCTION = 0.5 + MIN_TRUST_REDUCTION = 0.1 + SOC_THRESHOLD = 0.1 + TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885. + BOX_FACTOR = 0.5 + + n, = np.shape(x0) # Number of parameters + + # Set default lower and upper bounds. + if trust_lb is None: + trust_lb = np.full(n, -np.inf) + if trust_ub is None: + trust_ub = np.full(n, np.inf) + + # Initial values + x = np.copy(x0) + trust_radius = initial_trust_radius + penalty = initial_penalty + # Compute Values + f = fun0 + c = grad0 + b = constr0 + A = jac0 + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + + # Update state parameters + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + cg_info = {'niter': 0, 'stop_cond': 0, + 'hits_boundary': False} + + last_iteration_failed = False + while not stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + # Normal Step - `dn` + # minimize 1/2*||A dn + b||^2 + # subject to: + # ||dn|| <= TR_FACTOR * trust_radius + # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub. + dn = modified_dogleg(A, Y, b, + TR_FACTOR*trust_radius, + BOX_FACTOR*trust_lb, + BOX_FACTOR*trust_ub) + + # Tangential Step - `dt` + # Solve the QP problem: + # minimize 1/2 dt.T H dt + dt.T (H dn + c) + # subject to: + # A dt = 0 + # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2) + # lb - dn <= dt <= ub - dn + c_t = H.dot(dn) + c + b_t = np.zeros_like(b) + trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2) + lb_t = trust_lb - dn + ub_t = trust_ub - dn + dt, cg_info = projected_cg(H, c_t, Z, Y, b_t, + trust_radius_t, + lb_t, ub_t) + + # Compute update (normal + tangential steps). + d = dn + dt + + # Compute second order model: 1/2 d H d + c.T d + f. + quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d) + # Compute linearized constraint: l = A d + b. + linearized_constr = A.dot(d)+b + # Compute new penalty parameter according to formula (3.52), + # reference [2]_, p.891. + vpred = norm(b) - norm(linearized_constr) + # Guarantee `vpred` always positive, + # regardless of roundoff errors. + vpred = max(1e-16, vpred) + previous_penalty = penalty + if quadratic_model > 0: + new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred) + penalty = max(penalty, new_penalty) + # Compute predicted reduction according to formula (3.52), + # reference [2]_, p.891. + predicted_reduction = -quadratic_model + penalty*vpred + + # Compute merit function at current point + merit_function = f + penalty*norm(b) + # Evaluate function and constraints at trial point + x_next = x + S.dot(d) + f_next, b_next = fun_and_constr(x_next) + # Compute merit function at trial point + merit_function_next = f_next + penalty*norm(b_next) + # Compute actual reduction according to formula (3.54), + # reference [2]_, p.892. + actual_reduction = merit_function - merit_function_next + # Compute reduction ratio + reduction_ratio = actual_reduction / predicted_reduction + + # Second order correction (SOC), reference [2]_, p.892. + if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \ + norm(dn) <= SOC_THRESHOLD * norm(dt): + # Compute second order correction + y = -Y.dot(b_next) + # Make sure increment is inside box constraints + _, t, intersect = box_intersections(d, y, trust_lb, trust_ub) + # Compute tentative point + x_soc = x + S.dot(d + t*y) + f_soc, b_soc = fun_and_constr(x_soc) + # Recompute actual reduction + merit_function_soc = f_soc + penalty*norm(b_soc) + actual_reduction_soc = merit_function - merit_function_soc + # Recompute reduction ratio + reduction_ratio_soc = actual_reduction_soc / predicted_reduction + if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO: + x_next = x_soc + f_next = f_soc + b_next = b_soc + reduction_ratio = reduction_ratio_soc + + # Readjust trust region step, formula (3.55), reference [2]_, p.892. + if reduction_ratio >= LARGE_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d), + trust_radius) + elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d), + trust_radius) + # Reduce trust region step, according to reference [3]_, p.696. + elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO: + trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) / + (1-reduction_ratio)) + new_trust_radius = trust_reduction * norm(d) + if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius: + trust_radius *= MAX_TRUST_REDUCTION + elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius: + trust_radius = new_trust_radius + else: + trust_radius *= MIN_TRUST_REDUCTION + + # Update iteration + if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO: + x = x_next + f, b = f_next, b_next + c, A = grad_and_jac(x) + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + # Set Flag + last_iteration_failed = False + # Otimality values + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + else: + penalty = previous_penalty + last_iteration_failed = True + + return x, state diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py new file mode 100644 index 0000000000000000000000000000000000000000..2835ea5445c0eafc303f0cb1ab8543f48b7e3bb9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py @@ -0,0 +1,564 @@ +import time +import numpy as np +from scipy.sparse.linalg import LinearOperator +from .._differentiable_functions import VectorFunction +from .._constraints import ( + NonlinearConstraint, LinearConstraint, PreparedConstraint, Bounds, strict_bounds) +from .._hessian_update_strategy import BFGS +from .._optimize import OptimizeResult +from .._differentiable_functions import ScalarFunction +from .equality_constrained_sqp import equality_constrained_sqp +from .canonical_constraint import (CanonicalConstraint, + initial_constraints_as_canonical) +from .tr_interior_point import tr_interior_point +from .report import BasicReport, SQPReport, IPReport + + +TERMINATION_MESSAGES = { + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`xtol` termination condition is satisfied.", + 3: "`callback` function requested termination." +} + + +class HessianLinearOperator: + """Build LinearOperator from hessp""" + def __init__(self, hessp, n): + self.hessp = hessp + self.n = n + + def __call__(self, x, *args): + def matvec(p): + return self.hessp(x, p, *args) + + return LinearOperator((self.n, self.n), matvec=matvec) + + +class LagrangianHessian: + """The Hessian of the Lagrangian as LinearOperator. + + The Lagrangian is computed as the objective function plus all the + constraints multiplied with some numbers (Lagrange multipliers). + """ + def __init__(self, n, objective_hess, constraints_hess): + self.n = n + self.objective_hess = objective_hess + self.constraints_hess = constraints_hess + + def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): + H_objective = self.objective_hess(x) + H_constraints = self.constraints_hess(x, v_eq, v_ineq) + + def matvec(p): + return H_objective.dot(p) + H_constraints.dot(p) + + return LinearOperator((self.n, self.n), matvec) + + +def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, + start_time, tr_radius, constr_penalty, cg_info): + state.nit += 1 + state.nfev = objective.nfev + state.njev = objective.ngev + state.nhev = objective.nhev + state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + + if not last_iteration_failed: + state.x = x + state.fun = objective.f + state.grad = objective.g + state.v = [c.fun.v for c in prepared_constraints] + state.constr = [c.fun.f for c in prepared_constraints] + state.jac = [c.fun.J for c in prepared_constraints] + # Compute Lagrangian Gradient + state.lagrangian_grad = np.copy(state.grad) + for c in prepared_constraints: + state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) + state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) + # Compute maximum constraint violation + state.constr_violation = 0 + for i in range(len(prepared_constraints)): + lb, ub = prepared_constraints[i].bounds + c = state.constr[i] + state.constr_violation = np.max([state.constr_violation, + np.max(lb - c), + np.max(c - ub)]) + + state.execution_time = time.time() - start_time + state.tr_radius = tr_radius + state.constr_penalty = constr_penalty + state.cg_niter += cg_info["niter"] + state.cg_stop_cond = cg_info["stop_cond"] + + return state + + +def update_state_ip(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, + tr_radius, constr_penalty, cg_info, + barrier_parameter, barrier_tolerance): + state = update_state_sqp(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, tr_radius, + constr_penalty, cg_info) + state.barrier_parameter = barrier_parameter + state.barrier_tolerance = barrier_tolerance + return state + + +def _minimize_trustregion_constr(fun, x0, args, grad, + hess, hessp, bounds, constraints, + xtol=1e-8, gtol=1e-8, + barrier_tol=1e-8, + sparse_jacobian=None, + callback=None, maxiter=1000, + verbose=0, finite_diff_rel_step=None, + initial_constr_penalty=1.0, initial_tr_radius=1.0, + initial_barrier_parameter=0.1, + initial_barrier_tolerance=0.1, + factorization_method=None, + disp=False): + """Minimize a scalar function subject to constraints. + + Parameters + ---------- + gtol : float, optional + Tolerance for termination by the norm of the Lagrangian gradient. + The algorithm will terminate when both the infinity norm (i.e., max + abs value) of the Lagrangian gradient and the constraint violation + are smaller than ``gtol``. Default is 1e-8. + xtol : float, optional + Tolerance for termination by the change of the independent variable. + The algorithm will terminate when ``tr_radius < xtol``, where + ``tr_radius`` is the radius of the trust region used in the algorithm. + Default is 1e-8. + barrier_tol : float, optional + Threshold on the barrier parameter for the algorithm termination. + When inequality constraints are present, the algorithm will terminate + only when the barrier parameter is less than `barrier_tol`. + Default is 1e-8. + sparse_jacobian : {bool, None}, optional + Determines how to represent Jacobians of the constraints. If bool, + then Jacobians of all the constraints will be converted to the + corresponding format. If None (default), then Jacobians won't be + converted, but the algorithm can proceed only if they all have the + same format. + initial_tr_radius: float, optional + Initial trust radius. The trust radius gives the maximum distance + between solution points in consecutive iterations. It reflects the + trust the algorithm puts in the local approximation of the optimization + problem. For an accurate local approximation the trust-region should be + large and for an approximation valid only close to the current point it + should be a small one. The trust radius is automatically updated throughout + the optimization process, with ``initial_tr_radius`` being its initial value. + Default is 1 (recommended in [1]_, p. 19). + initial_constr_penalty : float, optional + Initial constraints penalty parameter. The penalty parameter is used for + balancing the requirements of decreasing the objective function + and satisfying the constraints. It is used for defining the merit function: + ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, + where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all + the constraints. The merit function is used for accepting or rejecting + trial points and ``constr_penalty`` weights the two conflicting goals + of reducing objective function and constraints. The penalty is automatically + updated throughout the optimization process, with + ``initial_constr_penalty`` being its initial value. Default is 1 + (recommended in [1]_, p 19). + initial_barrier_parameter, initial_barrier_tolerance: float, optional + Initial barrier parameter and initial tolerance for the barrier subproblem. + Both are used only when inequality constraints are present. For dealing with + optimization problems ``min_x f(x)`` subject to inequality constraints + ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem + ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality + constraints ``c(x) + s = 0`` instead of the original problem. This subproblem + is solved for decreasing values of ``barrier_parameter`` and with decreasing + tolerances for the termination, starting with ``initial_barrier_parameter`` + for the barrier parameter and ``initial_barrier_tolerance`` for the + barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). + Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated + with the same prefactor. + factorization_method : string or None, optional + Method to factorize the Jacobian of the constraints. Use None (default) + for the auto selection or one of: + + - 'NormalEquation' (requires scikit-sparse) + - 'AugmentedSystem' + - 'QRFactorization' + - 'SVDFactorization' + + The methods 'NormalEquation' and 'AugmentedSystem' can be used only + with sparse constraints. The projections required by the algorithm + will be computed using, respectively, the normal equation and the + augmented system approaches explained in [1]_. 'NormalEquation' + computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' + performs the LU factorization of an augmented system. They usually + provide similar results. 'AugmentedSystem' is used by default for + sparse matrices. + + The methods 'QRFactorization' and 'SVDFactorization' can be used + only with dense constraints. They compute the required projections + using, respectively, QR and SVD factorizations. The 'SVDFactorization' + method can cope with Jacobian matrices with deficient row rank and will + be used whenever other factorization methods fail (which may imply the + conversion of sparse matrices to a dense format when required). + By default, 'QRFactorization' is used for dense matrices. + finite_diff_rel_step : None or array_like, optional + Relative step size for the finite difference approximation. + maxiter : int, optional + Maximum number of algorithm iterations. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + * 3 : display progress during iterations (more complete report). + + disp : bool, optional + If True (default), then `verbose` will be set to 1 if it was 0. + + Returns + ------- + `OptimizeResult` with the fields documented below. Note the following: + + 1. All values corresponding to the constraints are ordered as they + were passed to the solver. And values corresponding to `bounds` + constraints are put *after* other constraints. + 2. All numbers of function, Jacobian or Hessian evaluations correspond + to numbers of actual Python function calls. It means, for example, + that if a Jacobian is estimated by finite differences, then the + number of Jacobian evaluations will be zero and the number of + function evaluations will be incremented by all calls during the + finite difference estimation. + + x : ndarray, shape (n,) + Solution found. + optimality : float + Infinity norm of the Lagrangian gradient at the solution. + constr_violation : float + Maximum constraint violation at the solution. + fun : float + Objective function at the solution. + grad : ndarray, shape (n,) + Gradient of the objective function at the solution. + lagrangian_grad : ndarray, shape (n,) + Gradient of the Lagrangian function at the solution. + nit : int + Total number of iterations. + nfev : integer + Number of the objective function evaluations. + njev : integer + Number of the objective function gradient evaluations. + nhev : integer + Number of the objective function Hessian evaluations. + cg_niter : int + Total number of the conjugate gradient method iterations. + method : {'equality_constrained_sqp', 'tr_interior_point'} + Optimization method used. + constr : list of ndarray + List of constraint values at the solution. + jac : list of {ndarray, sparse matrix} + List of the Jacobian matrices of the constraints at the solution. + v : list of ndarray + List of the Lagrange multipliers for the constraints at the solution. + For an inequality constraint a positive multiplier means that the upper + bound is active, a negative multiplier means that the lower bound is + active and if a multiplier is zero it means the constraint is not + active. + constr_nfev : list of int + Number of constraint evaluations for each of the constraints. + constr_njev : list of int + Number of Jacobian matrix evaluations for each of the constraints. + constr_nhev : list of int + Number of Hessian evaluations for each of the constraints. + tr_radius : float + Radius of the trust region at the last iteration. + constr_penalty : float + Penalty parameter at the last iteration, see `initial_constr_penalty`. + barrier_tolerance : float + Tolerance for the barrier subproblem at the last iteration. + Only for problems with inequality constraints. + barrier_parameter : float + Barrier parameter at the last iteration. Only for problems + with inequality constraints. + execution_time : float + Total execution time. + message : str + Termination message. + status : {0, 1, 2, 3} + Termination status: + + * 0 : The maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `xtol` termination condition is satisfied. + * 3 : `callback` function requested termination. + + cg_stop_cond : int + Reason for CG subproblem termination at the last iteration: + + * 0 : CG subproblem not evaluated. + * 1 : Iteration limit was reached. + * 2 : Reached the trust-region boundary. + * 3 : Negative curvature detected. + * 4 : Tolerance was satisfied. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + x0 = np.atleast_1d(x0).astype(float) + n_vars = np.size(x0) + if hess is None: + if callable(hessp): + hess = HessianLinearOperator(hessp, n_vars) + else: + hess = BFGS() + if disp and verbose == 0: + verbose = 1 + + if bounds is not None: + modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf) + modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf) + modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb) + modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub) + bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible) + finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, + bounds.keep_feasible, n_vars) + else: + finite_diff_bounds = (-np.inf, np.inf) + + # Define Objective Function + objective = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, finite_diff_bounds) + + # Put constraints in list format when needed. + if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): + constraints = [constraints] + + # Prepare constraints. + prepared_constraints = [ + PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) + for c in constraints] + + # Check that all constraints are either sparse or dense. + n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) + if 0 < n_sparse < len(prepared_constraints): + raise ValueError("All constraints must have the same kind of the " + "Jacobian --- either all sparse or all dense. " + "You can set the sparsity globally by setting " + "`sparse_jacobian` to either True of False.") + if prepared_constraints: + sparse_jacobian = n_sparse > 0 + + if bounds is not None: + if sparse_jacobian is None: + sparse_jacobian = True + prepared_constraints.append(PreparedConstraint(bounds, x0, + sparse_jacobian)) + + # Concatenate initial constraints to the canonical form. + c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( + n_vars, prepared_constraints, sparse_jacobian) + + # Prepare all canonical constraints and concatenate it into one. + canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) + for c in prepared_constraints] + + if len(canonical_all) == 0: + canonical = CanonicalConstraint.empty(n_vars) + elif len(canonical_all) == 1: + canonical = canonical_all[0] + else: + canonical = CanonicalConstraint.concatenate(canonical_all, + sparse_jacobian) + + # Generate the Hessian of the Lagrangian. + lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) + + # Choose appropriate method + if canonical.n_ineq == 0: + method = 'equality_constrained_sqp' + else: + method = 'tr_interior_point' + + # Construct OptimizeResult + state = OptimizeResult( + nit=0, nfev=0, njev=0, nhev=0, + cg_niter=0, cg_stop_cond=0, + fun=objective.f, grad=objective.g, + lagrangian_grad=np.copy(objective.g), + constr=[c.fun.f for c in prepared_constraints], + jac=[c.fun.J for c in prepared_constraints], + constr_nfev=[0 for c in prepared_constraints], + constr_njev=[0 for c in prepared_constraints], + constr_nhev=[0 for c in prepared_constraints], + v=[c.fun.v for c in prepared_constraints], + method=method) + + # Start counting + start_time = time.time() + + # Define stop criteria + if method == 'equality_constrained_sqp': + def stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + tr_radius, constr_penalty, cg_info): + state = update_state_sqp(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + SQPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward-compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif state.tr_radius < xtol: + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + elif method == 'tr_interior_point': + def stop_criteria(state, x, last_iteration_failed, tr_radius, + constr_penalty, cg_info, barrier_parameter, + barrier_tolerance): + state = update_state_ip(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info, barrier_parameter, barrier_tolerance) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + IPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.barrier_parameter, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward compatibility) + if callback is not None: + callback_stop = False + try: + callback_stop = callback(state) + except StopIteration: + callback_stop = True + if callback_stop: + state.status = 3 + return True + if state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif (state.tr_radius < xtol + and state.barrier_parameter < barrier_tol): + state.status = 2 + elif state.nit >= maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + + if verbose == 2: + BasicReport.print_header() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_header() + elif method == 'tr_interior_point': + IPReport.print_header() + + # Call inferior function to do the optimization + if method == 'equality_constrained_sqp': + def fun_and_constr(x): + f = objective.fun(x) + c_eq, _ = canonical.fun(x) + return f, c_eq + + def grad_and_jac(x): + g = objective.grad(x) + J_eq, _ = canonical.jac(x) + return g, J_eq + + _, result = equality_constrained_sqp( + fun_and_constr, grad_and_jac, lagrangian_hess, + x0, objective.f, objective.g, + c_eq0, J_eq0, + stop_criteria, state, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + elif method == 'tr_interior_point': + _, result = tr_interior_point( + objective.fun, objective.grad, lagrangian_hess, + n_vars, canonical.n_ineq, canonical.n_eq, + canonical.fun, canonical.jac, + x0, objective.f, objective.g, + c_ineq0, J_ineq0, c_eq0, J_eq0, + stop_criteria, + canonical.keep_feasible, + xtol, state, initial_barrier_parameter, + initial_barrier_tolerance, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + # Status 3 occurs when the callback function requests termination, + # this is assumed to not be a success. + result.success = True if result.status in (1, 2) else False + result.message = TERMINATION_MESSAGES[result.status] + + # Alias (for backward compatibility with 1.1.0) + result.niter = result.nit + + if verbose == 2: + BasicReport.print_footer() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_footer() + elif method == 'tr_interior_point': + IPReport.print_footer() + if verbose >= 1: + print(result.message) + print("Number of iterations: {}, function evaluations: {}, " + "CG iterations: {}, optimality: {:.2e}, " + "constraint violation: {:.2e}, execution time: {:4.2} s." + .format(result.nit, result.nfev, result.cg_niter, + result.optimality, result.constr_violation, + result.execution_time)) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py new file mode 100644 index 0000000000000000000000000000000000000000..a07b836bdbad688a265ae34ce91a361fd5050eb1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py @@ -0,0 +1,407 @@ +"""Basic linear factorizations needed by the solver.""" + +from scipy.sparse import (bmat, csc_matrix, eye, issparse) +from scipy.sparse.linalg import LinearOperator +import scipy.linalg +import scipy.sparse.linalg +try: + from sksparse.cholmod import cholesky_AAt + sksparse_available = True +except ImportError: + import warnings + sksparse_available = False +import numpy as np +from warnings import warn + +__all__ = [ + 'orthogonality', + 'projections', +] + + +def orthogonality(A, g): + """Measure orthogonality between a vector and the null space of a matrix. + + Compute a measure of orthogonality between the null space + of the (possibly sparse) matrix ``A`` and a given vector ``g``. + + The formula is a simplified (and cheaper) version of formula (3.13) + from [1]_. + ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + # Compute vector norms + norm_g = np.linalg.norm(g) + # Compute Froebnius norm of the matrix A + if issparse(A): + norm_A = scipy.sparse.linalg.norm(A, ord='fro') + else: + norm_A = np.linalg.norm(A, ord='fro') + + # Check if norms are zero + if norm_g == 0 or norm_A == 0: + return 0 + + norm_A_g = np.linalg.norm(A.dot(g)) + # Orthogonality measure + orth = norm_A_g / (norm_A*norm_g) + return orth + + +def normal_equation_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``NormalEquation`` approach. + """ + # Cholesky factorization + factor = cholesky_AAt(A) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + v = factor(A.dot(x)) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # z_next = z - A.T inv(A A.T) A z + v = factor(A.dot(z)) + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + return factor(A.dot(x)) + + # z = A.T inv(A A.T) x + def row_space(x): + return A.T.dot(factor(x)) + + return null_space, least_squares, row_space + + +def augmented_system_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A - ``AugmentedSystem``.""" + # Form augmented system + K = csc_matrix(bmat([[eye(n), A.T], [A, None]])) + # LU factorization + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + try: + solve = scipy.sparse.linalg.factorized(K) + except RuntimeError: + warn("Singular Jacobian matrix. Using dense SVD decomposition to " + "perform the factorizations.", + stacklevel=3) + return svd_factorization_projections(A.toarray(), + m, n, orth_tol, + max_refin, tol) + + # z = x - A.T inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [ z ] = [x] + # [A O ] [aux] [0] + def null_space(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + z = lu_sol[:n] + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.2. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # new_v = [x] - [I A.T] * [ z ] + # [0] [A O ] [aux] + new_v = v - K.dot(lu_sol) + # [I A.T] * [delta z ] = new_v + # [A O ] [delta aux] + lu_update = solve(new_v) + # [ z ] += [delta z ] + # [aux] [delta aux] + lu_sol += lu_update + z = lu_sol[:n] + k += 1 + + # return z = x - A.T inv(A A.T) A x + return z + + # z = inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [aux] = [x] + # [A O ] [ z ] [0] + def least_squares(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [aux] + # [ z ] + lu_sol = solve(v) + # return z = inv(A A.T) A x + return lu_sol[n:m+n] + + # z = A.T inv(A A.T) x + # is computed solving the extended system: + # [I A.T] * [ z ] = [0] + # [A O ] [aux] [x] + def row_space(x): + # v = [0] + # [x] + v = np.hstack([np.zeros(n), x]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + # return z = A.T inv(A A.T) x + return lu_sol[:n] + + return null_space, least_squares, row_space + + +def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``QRFactorization`` approach. + """ + # QRFactorization + Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic') + + if np.linalg.norm(R[-1, :], np.inf) < tol: + warn('Singular Jacobian matrix. Using SVD decomposition to ' + + 'perform the factorizations.', + stacklevel=3) + return svd_factorization_projections(A, m, n, + orth_tol, + max_refin, + tol) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v = np.zeros(m) + v[P] = aux2 + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = P inv(R) Q.T x + aux1 = Q.T.dot(z) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v[P] = aux2 + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + z = np.zeros(m) + z[P] = aux2 + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = Q inv(R.T) P.T x + aux1 = x[P] + aux2 = scipy.linalg.solve_triangular(R, aux1, + lower=False, + trans='T') + z = Q.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``SVDFactorization`` approach. + """ + # SVD Factorization + U, s, Vt = scipy.linalg.svd(A, full_matrices=False) + + # Remove dimensions related with very small singular values + U = U[:, s > tol] + Vt = Vt[s > tol, :] + s = s[s > tol] + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + v = U.dot(aux2) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(z) + aux2 = 1/s*aux1 + v = U.dot(aux2) + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + z = U.dot(aux2) + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = V 1/s U.T x + aux1 = U.T.dot(x) + aux2 = 1/s*aux1 + z = Vt.T.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15): + """Return three linear operators related with a given matrix A. + + Parameters + ---------- + A : sparse matrix (or ndarray), shape (m, n) + Matrix ``A`` used in the projection. + method : string, optional + Method used for compute the given linear + operators. Should be one of: + + - 'NormalEquation': The operators + will be computed using the + so-called normal equation approach + explained in [1]_. In order to do + so the Cholesky factorization of + ``(A A.T)`` is computed. Exclusive + for sparse matrices. + - 'AugmentedSystem': The operators + will be computed using the + so-called augmented system approach + explained in [1]_. Exclusive + for sparse matrices. + - 'QRFactorization': Compute projections + using QR factorization. Exclusive for + dense matrices. + - 'SVDFactorization': Compute projections + using SVD factorization. Exclusive for + dense matrices. + + orth_tol : float, optional + Tolerance for iterative refinements. + max_refin : int, optional + Maximum number of iterative refinements. + tol : float, optional + Tolerance for singular values. + + Returns + ------- + Z : LinearOperator, shape (n, n) + Null-space operator. For a given vector ``x``, + the null space operator is equivalent to apply + a projection matrix ``P = I - A.T inv(A A.T) A`` + to the vector. It can be shown that this is + equivalent to project ``x`` into the null space + of A. + LS : LinearOperator, shape (m, n) + Least-squares operator. For a given vector ``x``, + the least-squares operator is equivalent to apply a + pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A`` + to the vector. It can be shown that this vector + ``pinv(A.T) x`` is the least_square solution to + ``A.T y = x``. + Y : LinearOperator, shape (n, m) + Row-space operator. For a given vector ``x``, + the row-space operator is equivalent to apply a + projection matrix ``Q = A.T inv(A A.T)`` + to the vector. It can be shown that this + vector ``y = Q x`` the minimum norm solution + of ``A y = x``. + + Notes + ----- + Uses iterative refinements described in [1] + during the computation of ``Z`` in order to + cope with the possibility of large roundoff errors. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + m, n = np.shape(A) + + # The factorization of an empty matrix + # only works for the sparse representation. + if m*n == 0: + A = csc_matrix(A) + + # Check Argument + if issparse(A): + if method is None: + method = "AugmentedSystem" + if method not in ("NormalEquation", "AugmentedSystem"): + raise ValueError("Method not allowed for sparse matrix.") + if method == "NormalEquation" and not sksparse_available: + warnings.warn("Only accepts 'NormalEquation' option when " + "scikit-sparse is available. Using " + "'AugmentedSystem' option instead.", + ImportWarning, stacklevel=3) + method = 'AugmentedSystem' + else: + if method is None: + method = "QRFactorization" + if method not in ("QRFactorization", "SVDFactorization"): + raise ValueError("Method not allowed for dense array.") + + if method == 'NormalEquation': + null_space, least_squares, row_space \ + = normal_equation_projections(A, m, n, orth_tol, max_refin, tol) + elif method == 'AugmentedSystem': + null_space, least_squares, row_space \ + = augmented_system_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "QRFactorization": + null_space, least_squares, row_space \ + = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "SVDFactorization": + null_space, least_squares, row_space \ + = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) + + Z = LinearOperator((n, n), null_space) + LS = LinearOperator((m, n), least_squares) + Y = LinearOperator((n, m), row_space) + + return Z, LS, Y diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py new file mode 100644 index 0000000000000000000000000000000000000000..a039a7738c283f90f30fd7c4583bf9e1a8f559d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py @@ -0,0 +1,637 @@ +"""Equality-constrained quadratic programming solvers.""" + +from scipy.sparse import (linalg, bmat, csc_matrix) +from math import copysign +import numpy as np +from numpy.linalg import norm + +__all__ = [ + 'eqp_kktfact', + 'sphere_intersections', + 'box_intersections', + 'box_sphere_intersections', + 'inside_box_boundaries', + 'modified_dogleg', + 'projected_cg' +] + + +# For comparison with the projected CG +def eqp_kktfact(H, c, A, b): + """Solve equality-constrained quadratic programming (EQP) problem. + + Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` + using direct factorization of the KKT system. + + Parameters + ---------- + H : sparse matrix, shape (n, n) + Hessian matrix of the EQP problem. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + A : sparse matrix + Jacobian matrix of the EQP problem. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + + Returns + ------- + x : array_like, shape (n,) + Solution of the KKT problem. + lagrange_multipliers : ndarray, shape (m,) + Lagrange multipliers of the KKT problem. + """ + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Karush-Kuhn-Tucker matrix of coefficients. + # Defined as in Nocedal/Wright "Numerical + # Optimization" p.452 in Eq. (16.4). + kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) + # Vector of coefficients. + kkt_vec = np.hstack([-c, -b]) + + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + lu = linalg.splu(kkt_matrix) + kkt_sol = lu.solve(kkt_vec) + x = kkt_sol[:n] + lagrange_multipliers = -kkt_sol[n:n+m] + + return x, lagrange_multipliers + + +def sphere_intersections(z, d, trust_radius, + entire_line=False): + """Find the intersection between segment (or line) and spherical constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the ball + ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the ball + ``||x|| <= trust_radius``. When ``False``, the function returns the intersection + between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the ball for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line/segment + and the sphere. On the other hand, when ``False``, there is no + intersection. + """ + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + # Check for inf trust_radius + if np.isinf(trust_radius): + if entire_line: + ta = -np.inf + tb = np.inf + else: + ta = 0 + tb = 1 + intersect = True + return ta, tb, intersect + + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + discriminant = b*b - 4*a*c + if discriminant < 0: + intersect = False + return 0, 0, intersect + sqrt_discriminant = np.sqrt(discriminant) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + ta, tb = sorted([ta, tb]) + + if entire_line: + intersect = True + else: + # Checks to see if intersection happens + # within vectors length. + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + intersect = True + # Restrict intersection interval + # between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_intersections(z, d, lb, ub, + entire_line=False): + """Find the intersection between segment (or line) and box constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the rectangular box + ``lb <= x <= ub``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular + box. When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the box for + for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and the rectangular box. On the other hand, when ``False``, there is no + intersection. + """ + # Make sure it is a numpy array + z = np.asarray(z) + d = np.asarray(d) + lb = np.asarray(lb) + ub = np.asarray(ub) + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + + # Get values for which d==0 + zero_d = (d == 0) + # If the boundaries are not satisfied for some coordinate + # for which "d" is zero, there is no box-line intersection. + if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): + intersect = False + return 0, 0, intersect + # Remove values for which d is zero + not_zero_d = np.logical_not(zero_d) + z = z[not_zero_d] + d = d[not_zero_d] + lb = lb[not_zero_d] + ub = ub[not_zero_d] + + # Find a series of intervals (t_lb[i], t_ub[i]). + t_lb = (lb-z) / d + t_ub = (ub-z) / d + # Get the intersection of all those intervals. + ta = max(np.minimum(t_lb, t_ub)) + tb = min(np.maximum(t_lb, t_ub)) + + # Check if intersection is feasible + if ta <= tb: + intersect = True + else: + intersect = False + # Checks to see if intersection happens within vectors length. + if not entire_line: + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + # Restrict intersection interval between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_sphere_intersections(z, d, lb, ub, trust_radius, + entire_line=False, + extra_info=False): + """Find the intersection between segment (or line) and box/sphere constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d``, the rectangular box + ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True``, the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. + When ``False``, the function returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. + extra_info : bool, optional + When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the rectangular box and + inside the ball for ``ta <= t <= tb``. + intersect : bool + When ``True``, there is a intersection between the line (or segment) + and both constraints. On the other hand, when ``False``, there is no + intersection. + sphere_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the ball. And a boolean value indicating + whether the sphere is intersected by the line. + box_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercepts the box. And a boolean value indicating + whether the box is intersected by the line. + """ + ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, + entire_line) + ta_s, tb_s, intersect_s = sphere_intersections(z, d, + trust_radius, + entire_line) + ta = np.maximum(ta_b, ta_s) + tb = np.minimum(tb_b, tb_s) + if intersect_b and intersect_s and ta <= tb: + intersect = True + else: + intersect = False + + if extra_info: + sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} + box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} + return ta, tb, intersect, sphere_info, box_info + else: + return ta, tb, intersect + + +def inside_box_boundaries(x, lb, ub): + """Check if lb <= x <= ub.""" + return (lb <= x).all() and (x <= ub).all() + + +def reinforce_box_boundaries(x, lb, ub): + """Return clipped value of x""" + return np.minimum(np.maximum(x, lb), ub) + + +def modified_dogleg(A, Y, b, trust_radius, lb, ub): + """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. + + Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` + subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification + of the classical dogleg approach. + + Parameters + ---------- + A : LinearOperator (or sparse matrix or ndarray), shape (m, n) + Matrix ``A`` in the minimization problem. It should have + dimension ``(m, n)`` such that ``m < n``. + Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) + LinearOperator that apply the projection matrix + ``Q = A.T inv(A A.T)`` to the vector. The obtained vector + ``y = Q x`` being the minimum norm solution of ``A y = x``. + b : array_like, shape (m,) + Vector ``b``in the minimization problem. + trust_radius: float + Trust radius to be considered. Delimits a sphere boundary + to the problem. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. + It is expected that ``lb <= 0``, otherwise the algorithm + may fail. If ``lb[i] = -Inf``, the lower + bound for the ith component is just ignored. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. + It is expected that ``ub >= 0``, otherwise the algorithm + may fail. If ``ub[i] = Inf``, the upper bound for the ith + component is just ignored. + + Returns + ------- + x : array_like, shape (n,) + Solution to the problem. + + Notes + ----- + Based on implementations described in pp. 885-886 from [1]_. + + References + ---------- + .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + """ + # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. + newton_point = -Y.dot(b) + # Check for interior point + if inside_box_boundaries(newton_point, lb, ub) \ + and norm(newton_point) <= trust_radius: + x = newton_point + return x + + # Compute gradient vector ``g = A.T b`` + g = A.T.dot(b) + # Compute Cauchy point + # `cauchy_point = g.T g / (g.T A.T A g)``. + A_g = A.dot(g) + cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g + # Origin + origin_point = np.zeros_like(cauchy_point) + + # Check the segment between cauchy_point and newton_point + # for a possible solution. + z = cauchy_point + p = newton_point - cauchy_point + _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, + trust_radius) + if intersect: + x1 = z + alpha*p + else: + # Check the segment between the origin and cauchy_point + # for a possible solution. + z = origin_point + p = cauchy_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x1 = z + alpha*p + + # Check the segment between origin and newton_point + # for a possible solution. + z = origin_point + p = newton_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x2 = z + alpha*p + + # Return the best solution among x1 and x2. + if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): + return x1 + else: + return x2 + + +def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, + lb=None, ub=None, tol=None, + max_iter=None, max_infeasible_iter=None, + return_all=False): + """Solve EQP problem with projected CG method. + + Solve equality-constrained quadratic programming problem + ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, + possibly, to trust region constraints ``||x|| < trust_radius`` + and box constraints ``lb <= x <= ub``. + + Parameters + ---------- + H : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for computing ``H v``. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for projecting ``x`` into the null space of A. + Y : LinearOperator, sparse matrix, ndarray, shape (n, m) + Operator that, for a given a vector ``b``, compute smallest + norm solution of ``A x + b = 0``. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + trust_radius : float, optional + Trust radius to be considered. By default, uses ``trust_radius=inf``, + which means no trust radius at all. + lb : array_like, shape (n,), optional + Lower bounds to each one of the components of ``x``. + If ``lb[i] = -Inf`` the lower bound for the i-th + component is just ignored (default). + ub : array_like, shape (n, ), optional + Upper bounds to each one of the components of ``x``. + If ``ub[i] = Inf`` the upper bound for the i-th + component is just ignored (default). + tol : float, optional + Tolerance used to interrupt the algorithm. + max_iter : int, optional + Maximum algorithm iterations. Where ``max_inter <= n-m``. + By default, uses ``max_iter = n-m``. + max_infeasible_iter : int, optional + Maximum infeasible (regarding box constraints) iterations the + algorithm is allowed to take. + By default, uses ``max_infeasible_iter = n-m``. + return_all : bool, optional + When ``true``, return the list of all vectors through the iterations. + + Returns + ------- + x : array_like, shape (n,) + Solution of the EQP problem. + info : Dict + Dictionary containing the following: + + - niter : Number of iterations. + - stop_cond : Reason for algorithm termination: + 1. Iteration limit was reached; + 2. Reached the trust-region boundary; + 3. Negative curvature detected; + 4. Tolerance was satisfied. + - allvecs : List containing all intermediary vectors (optional). + - hits_boundary : True if the proposed step is on the boundary + of the trust region. + + Notes + ----- + Implementation of Algorithm 6.2 on [1]_. + + In the absence of spherical and box constraints, for sufficient + iterations, the method returns a truly optimal result. + In the presence of those constraints, the value returned is only + a inexpensive approximation of the optimal value. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + CLOSE_TO_ZERO = 1e-25 + + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Initial Values + x = Y.dot(-b) + r = Z.dot(H.dot(x) + c) + g = Z.dot(r) + p = -g + + # Store ``x`` value + if return_all: + allvecs = [x] + # Values for the first iteration + H_p = H.dot(p) + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + + # If x > trust-region the problem does not have a solution. + tr_distance = trust_radius - norm(x) + if tr_distance < 0: + raise ValueError("Trust region problem does not have a solution.") + # If x == trust_radius, then x is the solution + # to the optimization problem, since x is the + # minimum norm solution to Ax=b. + elif tr_distance < CLOSE_TO_ZERO: + info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} + if return_all: + allvecs.append(x) + info['allvecs'] = allvecs + return x, info + + # Set default tolerance + if tol is None: + tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) + # Set default lower and upper bounds + if lb is None: + lb = np.full(n, -np.inf) + if ub is None: + ub = np.full(n, np.inf) + # Set maximum iterations + if max_iter is None: + max_iter = n-m + max_iter = min(max_iter, n-m) + # Set maximum infeasible iterations + if max_infeasible_iter is None: + max_infeasible_iter = n-m + + hits_boundary = False + stop_cond = 1 + counter = 0 + last_feasible_x = np.zeros_like(x) + k = 0 + for i in range(max_iter): + # Stop criteria - Tolerance : r.T g < tol + if rt_g < tol: + stop_cond = 4 + break + k += 1 + # Compute curvature + pt_H_p = H_p.dot(p) + # Stop criteria - Negative curvature + if pt_H_p <= 0: + if np.isinf(trust_radius): + raise ValueError("Negative curvature not allowed " + "for unrestricted problems.") + else: + # Find intersection with constraints + _, alpha, intersect = box_sphere_intersections( + x, p, lb, ub, trust_radius, entire_line=True) + # Update solution + if intersect: + x = x + alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 3 + hits_boundary = True + break + + # Get next step + alpha = rt_g / pt_H_p + x_next = x + alpha*p + + # Stop criteria - Hits boundary + if np.linalg.norm(x_next) >= trust_radius: + # Find intersection with box constraints + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + # Update solution + if intersect: + x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 2 + hits_boundary = True + break + + # Check if ``x`` is inside the box and start counter if it is not. + if inside_box_boundaries(x_next, lb, ub): + counter = 0 + else: + counter += 1 + # Whenever outside box constraints keep looking for intersections. + if counter > 0: + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + if intersect: + last_feasible_x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + last_feasible_x = reinforce_box_boundaries(last_feasible_x, + lb, ub) + counter = 0 + # Stop after too many infeasible (regarding box constraints) iteration. + if counter > max_infeasible_iter: + break + # Store ``x_next`` value + if return_all: + allvecs.append(x_next) + + # Update residual + r_next = r + alpha*H_p + # Project residual g+ = Z r+ + g_next = Z.dot(r_next) + # Compute conjugate direction step d + rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) + beta = rt_g_next / rt_g + p = - g_next + beta*p + # Prepare for next iteration + x = x_next + g = g_next + r = g_next + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + H_p = H.dot(p) + + if not inside_box_boundaries(x, lb, ub): + x = last_feasible_x + hits_boundary = True + info = {'niter': k, 'stop_cond': stop_cond, + 'hits_boundary': hits_boundary} + if return_all: + info['allvecs'] = allvecs + return x, info diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3b2fb4ef5c90da78ae3f181159140e87393dcf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py @@ -0,0 +1,51 @@ +"""Progress report printers.""" + +from __future__ import annotations + +class ReportBase: + COLUMN_NAMES: list[str] = NotImplemented + COLUMN_WIDTHS: list[int] = NotImplemented + ITERATION_FORMATS: list[str] = NotImplemented + + @classmethod + def print_header(cls): + fmt = ("|" + + "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS]) + + "|") + separators = ['-' * x for x in cls.COLUMN_WIDTHS] + print(fmt.format(*cls.COLUMN_NAMES)) + print(fmt.format(*separators)) + + @classmethod + def print_iteration(cls, *args): + iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS] + fmt = "|" + "|".join(iteration_format) + "|" + print(fmt.format(*args)) + + @classmethod + def print_footer(cls): + print() + + +class BasicReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", + "^10.2e", "^10.2e", "^10.2e"] + + +class SQPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^7"] + + +class IPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "barrier param", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^13.2e", "^7"] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..452b327d02da3b3bd3fab9592bdef4d56d6aff57 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py @@ -0,0 +1,296 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from scipy.optimize._constraints import (NonlinearConstraint, Bounds, + PreparedConstraint) +from scipy.optimize._trustregion_constr.canonical_constraint \ + import CanonicalConstraint, initial_constraints_as_canonical + + +def create_quadratic_function(n, m, rng): + a = rng.rand(m) + A = rng.rand(m, n) + H = rng.rand(m, n, n) + HT = np.transpose(H, (1, 2, 0)) + + def fun(x): + return a + A.dot(x) + 0.5 * H.dot(x).dot(x) + + def jac(x): + return A + H.dot(x) + + def hess(x, v): + return HT.dot(v) + + return fun, jac, hess + + +def test_bounds_cases(): + # Test 1: no constraints. + user_constraint = Bounds(-np.inf, np.inf) + x0 = np.array([-1, 2]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 2))) + assert_array_equal(J_ineq, np.empty((0, 2))) + + assert_array_equal(c.keep_feasible, []) + + # Test 2: infinite lower bound. + user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True]) + x0 = np.array([-1, -2, -3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -4]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]])) + + assert_array_equal(c.keep_feasible, [False, True]) + + # Test 3: infinite upper bound. + user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True]) + x0 = np.array([1, 2, 3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -1]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]])) + + assert_array_equal(c.keep_feasible, [True, False]) + + # Test 4: interval constraint. + user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3], + [False, True, True, True]) + x0 = np.array([0, 10, 8, 5]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 1) + assert_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [2]) + assert_array_equal(c_ineq, [-1, -2, -1, -6]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, [[0, 0, 0, 1]]) + assert_array_equal(J_ineq, [[1, 0, 0, 0], + [0, 0, 1, 0], + [-1, 0, 0, 0], + [0, 0, -1, 0]]) + + assert_array_equal(c.keep_feasible, [False, True, False, True]) + + +def test_nonlinear_constraint(): + n = 3 + m = 5 + rng = np.random.RandomState(0) + x0 = rng.rand(n) + + fun, jac, hess = create_quadratic_function(n, m, rng) + f = fun(x0) + J = jac(x0) + + lb = [-10, 3, -np.inf, -np.inf, -5] + ub = [10, 3, np.inf, 3, np.inf] + user_constraint = NonlinearConstraint( + fun, lb, ub, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + prepared_constraint = PreparedConstraint(user_constraint, x0, + sparse_jacobian) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_array_equal(c.n_eq, 1) + assert_array_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f[1] - lb[1]]) + assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4], + f[0] - ub[0], lb[0] - f[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, J[1, None]) + assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(m) + v[1] = v_eq[0] + v[3] = v_ineq[0] + v[4] = -v_ineq[1] + v[0] = v_ineq[2] - v_ineq[3] + assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v)) + + assert_array_equal(c.keep_feasible, [True, False, True, True]) + + +def test_concatenation(): + rng = np.random.RandomState(0) + n = 4 + x0 = rng.rand(n) + + f1 = x0 + J1 = np.eye(n) + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + f2 = fun(x0) + J2 = jac(x0) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared) + c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared) + c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian) + + assert_equal(c.n_eq, 2) + assert_equal(c.n_ineq, 7) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(5) + v[1] = v_eq[1] + v[3] = v_ineq[3] + v[4] = -v_ineq[4] + v[0] = v_ineq[5] - v_ineq[6] + H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n)) + assert_array_equal(H, hess(x0, v)) + + assert_array_equal(c.keep_feasible, + [True, False, False, True, False, True, True]) + + +def test_empty(): + x = np.array([1, 2, 3]) + c = CanonicalConstraint.empty(3) + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.empty((0, 3))) + + H = c.hess(x, None, None).toarray() + assert_array_equal(H, np.zeros((3, 3))) + + +def test_initial_constraints_as_canonical(): + # rng is only used to generate the coefficients of the quadratic + # function that is used by the nonlinear constraint. + rng = np.random.RandomState(0) + + x0 = np.array([0.5, 0.4, 0.3, 0.2]) + n = len(x0) + + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + f1 = bounds_prepared.fun.f + J1 = bounds_prepared.fun.J + f2 = nonlinear_prepared.fun.f + J2 = nonlinear_prepared.fun.J + + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [bounds_prepared, nonlinear_prepared], sparse_jacobian) + + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + if sparse_jacobian: + J1 = J1.toarray() + J2 = J2.toarray() + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + +def test_initial_constraints_as_canonical_empty(): + n = 3 + for sparse_jacobian in [False, True]: + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [], sparse_jacobian) + + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.empty((0, n))) + assert_array_equal(J_ineq, np.empty((0, n))) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py new file mode 100644 index 0000000000000000000000000000000000000000..121143fad2a8df3a8986beffc5043622d9ace993 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py @@ -0,0 +1,346 @@ +"""Trust-region interior point method. + +References +---------- +.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. +.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal. + "On the local behavior of an interior point method for + nonlinear programming." Numerical analysis 1997 (1997): 37-56. +.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). +""" + +import scipy.sparse as sps +import numpy as np +from .equality_constrained_sqp import equality_constrained_sqp +from scipy.sparse.linalg import LinearOperator + +__all__ = ['tr_interior_point'] + + +class BarrierSubproblem: + """ + Barrier optimization problem: + minimize fun(x) - barrier_parameter*sum(log(s)) + subject to: constr_eq(x) = 0 + constr_ineq(x) + s = 0 + """ + + def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, barrier_parameter, tolerance, + enforce_feasibility, global_stop_criteria, + xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0): + # Store parameters + self.n_vars = n_vars + self.x0 = x0 + self.s0 = s0 + self.fun = fun + self.grad = grad + self.lagr_hess = lagr_hess + self.constr = constr + self.jac = jac + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + self.n_eq = n_eq + self.n_ineq = n_ineq + self.enforce_feasibility = enforce_feasibility + self.global_stop_criteria = global_stop_criteria + self.xtol = xtol + self.fun0 = self._compute_function(fun0, constr_ineq0, s0) + self.grad0 = self._compute_gradient(grad0) + self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0) + self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0) + self.terminate = False + + def update(self, barrier_parameter, tolerance): + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + + def get_slack(self, z): + return z[self.n_vars:self.n_vars+self.n_ineq] + + def get_variables(self, z): + return z[:self.n_vars] + + def function_and_constraints(self, z): + """Returns barrier function and constraints at given point. + + For z = [x, s], returns barrier function: + function(z) = fun(x) - barrier_parameter*sum(log(s)) + and barrier constraints: + constraints(z) = [ constr_eq(x) ] + [ constr_ineq(x) + s ] + + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute function and constraints + f = self.fun(x) + c_eq, c_ineq = self.constr(x) + # Return objective function and constraints + return (self._compute_function(f, c_ineq, s), + self._compute_constr(c_ineq, c_eq, s)) + + def _compute_function(self, f, c_ineq, s): + # Use technique from Nocedal and Wright book, ref [3]_, p.576, + # to guarantee constraints from `enforce_feasibility` + # stay feasible along iterations. + s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility] + log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s] + # Compute barrier objective function + return f - self.barrier_parameter*np.sum(log_s) + + def _compute_constr(self, c_ineq, c_eq, s): + # Compute barrier constraint + return np.hstack((c_eq, + c_ineq + s)) + + def scaling(self, z): + """Returns scaling vector. + Given by: + scaling = [ones(n_vars), s] + """ + s = self.get_slack(z) + diag_elements = np.hstack((np.ones(self.n_vars), s)) + + # Diagonal matrix + def matvec(vec): + return diag_elements*vec + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def gradient_and_jacobian(self, z): + """Returns scaled gradient. + + Return scaled gradient: + gradient = [ grad(x) ] + [ -barrier_parameter*ones(n_ineq) ] + and scaled Jacobian matrix: + jacobian = [ jac_eq(x) 0 ] + [ jac_ineq(x) S ] + Both of them scaled by the previously defined scaling factor. + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute first derivatives + g = self.grad(x) + J_eq, J_ineq = self.jac(x) + # Return gradient and Jacobian + return (self._compute_gradient(g), + self._compute_jacobian(J_eq, J_ineq, s)) + + def _compute_gradient(self, g): + return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq))) + + def _compute_jacobian(self, J_eq, J_ineq, s): + if self.n_ineq == 0: + return J_eq + else: + if sps.issparse(J_eq) or sps.issparse(J_ineq): + # It is expected that J_eq and J_ineq + # are already `csr_matrix` because of + # the way ``BoxConstraint``, ``NonlinearConstraint`` + # and ``LinearConstraint`` are defined. + J_eq = sps.csr_matrix(J_eq) + J_ineq = sps.csr_matrix(J_ineq) + return self._assemble_sparse_jacobian(J_eq, J_ineq, s) + else: + S = np.diag(s) + zeros = np.zeros((self.n_eq, self.n_ineq)) + # Convert to matrix + if sps.issparse(J_ineq): + J_ineq = J_ineq.toarray() + if sps.issparse(J_eq): + J_eq = J_eq.toarray() + # Concatenate matrices + return np.block([[J_eq, zeros], + [J_ineq, S]]) + + def _assemble_sparse_jacobian(self, J_eq, J_ineq, s): + """Assemble sparse Jacobian given its components. + + Given ``J_eq``, ``J_ineq`` and ``s`` returns: + jacobian = [ J_eq, 0 ] + [ J_ineq, diag(s) ] + + It is equivalent to: + sps.bmat([[ J_eq, None ], + [ J_ineq, diag(s) ]], "csr") + but significantly more efficient for this + given structure. + """ + n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq + J_aux = sps.vstack([J_eq, J_ineq], "csr") + indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data + new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), + np.arange(n_ineq+1, dtype=int))) + size = indices.size+n_ineq + new_indices = np.empty(size) + new_data = np.empty(size) + mask = np.full(size, False, bool) + mask[new_indptr[-n_ineq:]-1] = True + new_indices[mask] = n_vars+np.arange(n_ineq) + new_indices[~mask] = indices + new_data[mask] = s + new_data[~mask] = data + J = sps.csr_matrix((new_data, new_indices, new_indptr), + (n_eq + n_ineq, n_vars + n_ineq)) + return J + + def lagrangian_hessian_x(self, z, v): + """Returns Lagrangian Hessian (in relation to `x`) -> Hx""" + x = self.get_variables(z) + # Get lagrange multipliers related to nonlinear equality constraints + v_eq = v[:self.n_eq] + # Get lagrange multipliers related to nonlinear ineq. constraints + v_ineq = v[self.n_eq:self.n_eq+self.n_ineq] + lagr_hess = self.lagr_hess + return lagr_hess(x, v_eq, v_ineq) + + def lagrangian_hessian_s(self, z, v): + """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S""" + s = self.get_slack(z) + # Using the primal formulation: + # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s). + # Reference [1]_ p. 882, formula (3.1) + primal = self.barrier_parameter + # Using the primal-dual formulation + # S Hs S = diag(s)*diag(v/s)*diag(s) + # Reference [1]_ p. 883, formula (3.11) + primal_dual = v[-self.n_ineq:]*s + # Uses the primal-dual formulation for + # positives values of v_ineq, and primal + # formulation for the remaining ones. + return np.where(v[-self.n_ineq:] > 0, primal_dual, primal) + + def lagrangian_hessian(self, z, v): + """Returns scaled Lagrangian Hessian""" + # Compute Hessian in relation to x and s + Hx = self.lagrangian_hessian_x(z, v) + if self.n_ineq > 0: + S_Hs_S = self.lagrangian_hessian_s(z, v) + + # The scaled Lagragian Hessian is: + # [ Hx 0 ] + # [ 0 S Hs S ] + def matvec(vec): + vec_x = self.get_variables(vec) + vec_s = self.get_slack(vec) + if self.n_ineq > 0: + return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) + else: + return Hx.dot(vec_x) + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def stop_criteria(self, state, z, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + """Stop criteria to the barrier problem. + The criteria here proposed is similar to formula (2.3) + from [1]_, p.879. + """ + x = self.get_variables(z) + if self.global_stop_criteria(state, x, + last_iteration_failed, + trust_radius, penalty, + cg_info, + self.barrier_parameter, + self.tolerance): + self.terminate = True + return True + else: + g_cond = (optimality < self.tolerance and + constr_violation < self.tolerance) + x_cond = trust_radius < self.xtol + return g_cond or x_cond + + +def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, x0, fun0, grad0, + constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0, stop_criteria, + enforce_feasibility, xtol, state, + initial_barrier_parameter, + initial_tolerance, + initial_penalty, + initial_trust_radius, + factorization_method): + """Trust-region interior points method. + + Solve problem: + minimize fun(x) + subject to: constr_ineq(x) <= 0 + constr_eq(x) = 0 + using trust-region interior point method described in [1]_. + """ + # BOUNDARY_PARAMETER controls the decrease on the slack + # variables. Represents ``tau`` from [1]_ p.885, formula (3.18). + BOUNDARY_PARAMETER = 0.995 + # BARRIER_DECAY_RATIO controls the decay of the barrier parameter + # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879. + BARRIER_DECAY_RATIO = 0.2 + # TRUST_ENLARGEMENT controls the enlargement on trust radius + # after each iteration + TRUST_ENLARGEMENT = 5 + + # Default enforce_feasibility + if enforce_feasibility is None: + enforce_feasibility = np.zeros(n_ineq, bool) + # Initial Values + barrier_parameter = initial_barrier_parameter + tolerance = initial_tolerance + trust_radius = initial_trust_radius + # Define initial value for the slack variables + s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq)) + # Define barrier subproblem + subprob = BarrierSubproblem( + x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, + barrier_parameter, tolerance, enforce_feasibility, + stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, + constr_eq0, jac_eq0) + # Define initial parameter for the first iteration. + z = np.hstack((x0, s0)) + fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0 + grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0 + # Define trust region bounds + trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf), + np.full(subprob.n_ineq, -BOUNDARY_PARAMETER))) + trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf) + + # Solves a sequence of barrier problems + while True: + # Solve SQP subproblem + z, state = equality_constrained_sqp( + subprob.function_and_constraints, + subprob.gradient_and_jacobian, + subprob.lagrangian_hessian, + z, fun0_subprob, grad0_subprob, + constr0_subprob, jac0_subprob, subprob.stop_criteria, + state, initial_penalty, trust_radius, + factorization_method, trust_lb, trust_ub, subprob.scaling) + if subprob.terminate: + break + # Update parameters + trust_radius = max(initial_trust_radius, + TRUST_ENLARGEMENT*state.tr_radius) + # TODO: Use more advanced strategies from [2]_ + # to update this parameters. + barrier_parameter *= BARRIER_DECAY_RATIO + tolerance *= BARRIER_DECAY_RATIO + # Update Barrier Problem + subprob.update(barrier_parameter, tolerance) + # Compute initial values for next iteration + fun0_subprob, constr0_subprob = subprob.function_and_constraints(z) + grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z) + + # Get x and s + x = subprob.get_variables(z) + return x, state diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py new file mode 100644 index 0000000000000000000000000000000000000000..a54abd60c703408d6c87cb5020d6781fdf0213c7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py @@ -0,0 +1,122 @@ +"""Dog-leg trust-region optimization.""" +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the dog-leg trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for dogleg minimization') + if not callable(hess): + raise ValueError('Hessian is required for dogleg minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=DoglegSubproblem, + **trust_region_options) + + +class DoglegSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by the dogleg method""" + + def cauchy_point(self): + """ + The Cauchy point is minimal along the direction of steepest descent. + """ + if self._cauchy_point is None: + g = self.jac + Bg = self.hessp(g) + self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g + return self._cauchy_point + + def newton_point(self): + """ + The Newton point is a global minimum of the approximate function. + """ + if self._newton_point is None: + g = self.jac + B = self.hess + cho_info = scipy.linalg.cho_factor(B) + self._newton_point = -scipy.linalg.cho_solve(cho_info, g) + return self._newton_point + + def solve(self, trust_radius): + """ + Minimize a function using the dog-leg trust-region algorithm. + + This algorithm requires function values and first and second derivatives. + It also performs a costly Hessian decomposition for most iterations, + and the Hessian is required to be positive definite. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + The Hessian is required to be positive definite. + + References + ---------- + .. [1] Jorge Nocedal and Stephen Wright, + Numerical Optimization, second edition, + Springer-Verlag, 2006, page 73. + """ + + # Compute the Newton point. + # This is the optimum for the quadratic model function. + # If it is inside the trust radius then return this point. + p_best = self.newton_point() + if scipy.linalg.norm(p_best) < trust_radius: + hits_boundary = False + return p_best, hits_boundary + + # Compute the Cauchy point. + # This is the predicted optimum along the direction of steepest descent. + p_u = self.cauchy_point() + + # If the Cauchy point is outside the trust region, + # then return the point where the path intersects the boundary. + p_u_norm = scipy.linalg.norm(p_u) + if p_u_norm >= trust_radius: + p_boundary = p_u * (trust_radius / p_u_norm) + hits_boundary = True + return p_boundary, hits_boundary + + # Compute the intersection of the trust region boundary + # and the line segment connecting the Cauchy and Newton points. + # This requires solving a quadratic equation. + # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 + # Solve this for positive time t using the quadratic formula. + _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, + trust_radius) + p_boundary = p_u + tb * (p_best - p_u) + hits_boundary = True + return p_boundary, hits_boundary diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py new file mode 100644 index 0000000000000000000000000000000000000000..21fc3d5609d2b41eb5b5ad840ef464522565054c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py @@ -0,0 +1,438 @@ +"""Nearly exact trust-region optimization subproblem.""" +import numpy as np +from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, + cho_solve) +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = ['_minimize_trustregion_exact', + 'estimate_smallest_singular_value', + 'singular_leading_submatrix', + 'IterativeSubproblem'] + + +def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + a nearly exact trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than ``gtol`` before successful + termination. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ' + 'exact minimization.') + if not callable(hess): + raise ValueError('Hessian matrix is required for trust region ' + 'exact minimization.') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=IterativeSubproblem, + **trust_region_options) + + +def estimate_smallest_singular_value(U): + """Given upper triangular matrix ``U`` estimate the smallest singular + value and the correspondent right singular vector in O(n**2) operations. + + Parameters + ---------- + U : ndarray + Square upper triangular matrix. + + Returns + ------- + s_min : float + Estimated smallest singular value of the provided matrix. + z_min : ndarray + Estimatied right singular vector. + + Notes + ----- + The procedure is based on [1]_ and is done in two steps. First, it finds + a vector ``e`` with components selected from {+1, -1} such that the + solution ``w`` from the system ``U.T w = e`` is as large as possible. + Next it estimate ``U v = w``. The smallest singular value is close + to ``norm(w)/norm(v)`` and the right singular vector is close + to ``v/norm(v)``. + + The estimation will be better more ill-conditioned is the matrix. + + References + ---------- + .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. + An estimate for the condition number of a matrix. 1979. + SIAM Journal on Numerical Analysis, 16(2), 368-375. + """ + + U = np.atleast_2d(U) + m, n = U.shape + + if m != n: + raise ValueError("A square triangular matrix should be provided.") + + # A vector `e` with components selected from {+1, -1} + # is selected so that the solution `w` to the system + # `U.T w = e` is as large as possible. Implementation + # based on algorithm 3.5.1, p. 142, from reference [2] + # adapted for lower triangular matrix. + + p = np.zeros(n) + w = np.empty(n) + + # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press. pp. 140-142. + for k in range(n): + wp = (1-p[k]) / U.T[k, k] + wm = (-1-p[k]) / U.T[k, k] + pp = p[k+1:] + U.T[k+1:, k]*wp + pm = p[k+1:] + U.T[k+1:, k]*wm + + if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): + w[k] = wp + p[k+1:] = pp + else: + w[k] = wm + p[k+1:] = pm + + # The system `U v = w` is solved using backward substitution. + v = solve_triangular(U, w) + + v_norm = norm(v) + w_norm = norm(w) + + # Smallest singular value + s_min = w_norm / v_norm + + # Associated vector + z_min = v / v_norm + + return s_min, z_min + + +def gershgorin_bounds(H): + """ + Given a square matrix ``H`` compute upper + and lower bounds for its eigenvalues (Gregoshgorin Bounds). + Defined ref. [1]. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + + H_diag = np.diag(H) + H_diag_abs = np.abs(H_diag) + H_row_sums = np.sum(np.abs(H), axis=1) + lb = np.min(H_diag + H_diag_abs - H_row_sums) + ub = np.max(H_diag - H_diag_abs + H_row_sums) + + return lb, ub + + +def singular_leading_submatrix(A, U, k): + """ + Compute term that makes the leading ``k`` by ``k`` + submatrix from ``A`` singular. + + Parameters + ---------- + A : ndarray + Symmetric matrix that is not positive definite. + U : ndarray + Upper triangular matrix resulting of an incomplete + Cholesky decomposition of matrix ``A``. + k : int + Positive integer such that the leading k by k submatrix from + `A` is the first non-positive definite leading submatrix. + + Returns + ------- + delta : float + Amount that should be added to the element (k, k) of the + leading k by k submatrix of ``A`` to make it singular. + v : ndarray + A vector such that ``v.T B v = 0``. Where B is the matrix A after + ``delta`` is added to its element (k, k). + """ + + # Compute delta + delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] + + n = len(A) + + # Inicialize v + v = np.zeros(n) + v[k-1] = 1 + + # Compute the remaining values of v by solving a triangular system. + if k != 1: + v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) + + return delta, v + + +class IterativeSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by nearly exact iterative method. + + Notes + ----- + This subproblem solver was based on [1]_, [2]_ and [3]_, + which implement similar algorithms. The algorithm is basically + that of [1]_ but ideas from [2]_ and [3]_ were also used. + + References + ---------- + .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", + Siam, pp. 169-200, 2000. + .. [2] J. Nocedal and S. Wright, "Numerical optimization", + Springer Science & Business Media. pp. 83-91, 2006. + .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", + SIAM Journal on Scientific and Statistical Computing, vol. 4(3), + pp. 553-572, 1983. + """ + + # UPDATE_COEFF appears in reference [1]_ + # in formula 7.3.14 (p. 190) named as "theta". + # As recommended there it value is fixed in 0.01. + UPDATE_COEFF = 0.01 + + EPS = np.finfo(float).eps + + def __init__(self, x, fun, jac, hess, hessp=None, + k_easy=0.1, k_hard=0.2): + + super().__init__(x, fun, jac, hess) + + # When the trust-region shrinks in two consecutive + # calculations (``tr_radius < previous_tr_radius``) + # the lower bound ``lambda_lb`` may be reused, + # facilitating the convergence. To indicate no + # previous value is known at first ``previous_tr_radius`` + # is set to -1 and ``lambda_lb`` to None. + self.previous_tr_radius = -1 + self.lambda_lb = None + + self.niter = 0 + + # ``k_easy`` and ``k_hard`` are parameters used + # to determine the stop criteria to the iterative + # subproblem solver. Take a look at pp. 194-197 + # from reference _[1] for a more detailed description. + self.k_easy = k_easy + self.k_hard = k_hard + + # Get Lapack function for cholesky decomposition. + # The implemented SciPy wrapper does not return + # the incomplete factorization needed by the method. + self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) + + # Get info about Hessian + self.dimension = len(self.hess) + self.hess_gershgorin_lb,\ + self.hess_gershgorin_ub = gershgorin_bounds(self.hess) + self.hess_inf = norm(self.hess, np.inf) + self.hess_fro = norm(self.hess, 'fro') + + # A constant such that for vectors smaller than that + # backward substituition is not reliable. It was stabilished + # based on Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press., p.165. + self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf + + def _initial_values(self, tr_radius): + """Given a trust radius, return a good initial guess for + the damping factor, the lower bound and the upper bound. + The values were chosen accordingly to the guidelines on + section 7.3.8 (p. 192) from [1]_. + """ + + # Upper bound for the damping factor + lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, + self.hess_fro, + self.hess_inf)) + + # Lower bound for the damping factor + lambda_lb = max(0, -min(self.hess.diagonal()), + self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, + self.hess_fro, + self.hess_inf)) + + # Improve bounds with previous info + if tr_radius < self.previous_tr_radius: + lambda_lb = max(self.lambda_lb, lambda_lb) + + # Initial guess for the damping factor + if lambda_lb == 0: + lambda_initial = 0 + else: + lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + return lambda_initial, lambda_lb, lambda_ub + + def solve(self, tr_radius): + """Solve quadratic subproblem""" + + lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) + n = self.dimension + hits_boundary = True + already_factorized = False + self.niter = 0 + + while True: + + # Compute Cholesky factorization + if already_factorized: + already_factorized = False + else: + H = self.hess+lambda_current*np.eye(n) + U, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + self.niter += 1 + + # Check if factorization succeeded + if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: + # Successful factorization + + # Solve `U.T U p = s` + p = cho_solve((U, False), -self.jac) + + p_norm = norm(p) + + # Check for interior convergence + if p_norm <= tr_radius and lambda_current == 0: + hits_boundary = False + break + + # Solve `U.T w = p` + w = solve_triangular(U, p, trans='T') + + w_norm = norm(w) + + # Compute Newton step accordingly to + # formula (4.44) p.87 from ref [2]_. + delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius + lambda_new = lambda_current + delta_lambda + + if p_norm < tr_radius: # Inside boundary + s_min, z_min = estimate_smallest_singular_value(U) + + ta, tb = self.get_boundaries_intersections(p, z_min, + tr_radius) + + # Choose `step_len` with the smallest magnitude. + # The reason for this choice is explained at + # ref [3]_, p. 6 (Immediately before the formula + # for `tau`). + step_len = min([ta, tb], key=abs) + + # Compute the quadratic term (p.T*H*p) + quadratic_term = np.dot(p, np.dot(H, p)) + + # Check stop criteria + relative_error = ((step_len**2 * s_min**2) + / (quadratic_term + lambda_current*tr_radius**2)) + if relative_error <= self.k_hard: + p += step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Compute Cholesky factorization + H = self.hess + lambda_new*np.eye(n) + c, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + # Check if the factorization have succeeded + # + if info == 0: # Successful factorization + # Update damping factor + lambda_current = lambda_new + already_factorized = True + else: # Unsuccessful factorization + # Update uncertanty bounds + lambda_lb = max(lambda_lb, lambda_new) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Outside boundary + # Check stop criteria + relative_error = abs(p_norm - tr_radius) / tr_radius + if relative_error <= self.k_easy: + break + + # Update uncertanty bounds + lambda_lb = lambda_current + + # Update damping factor + lambda_current = lambda_new + + elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: + # jac_mag very close to zero + + # Check for interior convergence + if lambda_current == 0: + p = np.zeros(n) + hits_boundary = False + break + + s_min, z_min = estimate_smallest_singular_value(U) + step_len = tr_radius + + # Check stop criteria + if (step_len**2 * s_min**2 + <= self.k_hard * lambda_current * tr_radius**2): + p = step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + else: # Unsuccessful factorization + + # Compute auxiliary terms + delta, v = singular_leading_submatrix(H, U, info) + v_norm = norm(v) + + # Update uncertanty interval + lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) + + # Update damping factor + lambda_current = max( + np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb) + ) + + self.lambda_lb = lambda_lb + self.lambda_current = lambda_current + self.previous_tr_radius = tr_radius + + return p, hits_boundary diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7af3971d3d8f44903d9da2be40e4e414cbf6463d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cobyla.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cobyla.py new file mode 100644 index 0000000000000000000000000000000000000000..10e2b6a101a386f318a39e00ab2a46eae5219066 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cobyla.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'RLock', + 'fmin_cobyla', + 'functools', + 'izip', + 'synchronized', +] + +def __dir__(): + return __all__ + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="cobyla", + private_modules=["_cobyla_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a07250bbeb06542721480c42005307992558fced --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__init__.py @@ -0,0 +1,133 @@ +""" +Cython optimize root finding API +================================ +The underlying C functions for the following root finders can be accessed +directly using Cython: + +- `~scipy.optimize.bisect` +- `~scipy.optimize.ridder` +- `~scipy.optimize.brenth` +- `~scipy.optimize.brentq` + +The Cython API for the root finding functions is similar except there is no +``disp`` argument. Import the root finding functions using ``cimport`` from +`scipy.optimize.cython_optimize`. :: + + from scipy.optimize.cython_optimize cimport bisect, ridder, brentq, brenth + + +Callback signature +------------------ +The zeros functions in `~scipy.optimize.cython_optimize` expect a callback that +takes a double for the scalar independent variable as the 1st argument and a +user defined ``struct`` with any extra parameters as the 2nd argument. :: + + double (*callback_type)(double, void*) noexcept + + +Examples +-------- +Usage of `~scipy.optimize.cython_optimize` requires Cython to write callbacks +that are compiled into C. For more information on compiling Cython, see the +`Cython Documentation `_. + +These are the basic steps: + +1. Create a Cython ``.pyx`` file, for example: ``myexample.pyx``. +2. Import the desired root finder from `~scipy.optimize.cython_optimize`. +3. Write the callback function, and call the selected root finding function + passing the callback, any extra arguments, and the other solver + parameters. :: + + from scipy.optimize.cython_optimize cimport brentq + + # import math from Cython + from libc cimport math + + myargs = {'C0': 1.0, 'C1': 0.7} # a dictionary of extra arguments + XLO, XHI = 0.5, 1.0 # lower and upper search boundaries + XTOL, RTOL, MITR = 1e-3, 1e-3, 10 # other solver parameters + + # user-defined struct for extra parameters + ctypedef struct test_params: + double C0 + double C1 + + + # user-defined callback + cdef double f(double x, void *args) noexcept: + cdef test_params *myargs = args + return myargs.C0 - math.exp(-(x - myargs.C1)) + + + # Cython wrapper function + cdef double brentq_wrapper_example(dict args, double xa, double xb, + double xtol, double rtol, int mitr): + # Cython automatically casts dictionary to struct + cdef test_params myargs = args + return brentq( + f, xa, xb, &myargs, xtol, rtol, mitr, NULL) + + + # Python function + def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL, + mitr=MITR): + '''Calls Cython wrapper from Python.''' + return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr) + +4. If you want to call your function from Python, create a Cython wrapper, and + a Python function that calls the wrapper, or use ``cpdef``. Then, in Python, + you can import and run the example. :: + + from myexample import brentq_example + + x = brentq_example() + # 0.6999942848231314 + +5. Create a Cython ``.pxd`` file if you need to export any Cython functions. + + +Full output +----------- +The functions in `~scipy.optimize.cython_optimize` can also copy the full +output from the solver to a C ``struct`` that is passed as its last argument. +If you don't want the full output, just pass ``NULL``. The full output +``struct`` must be type ``zeros_full_output``, which is defined in +`scipy.optimize.cython_optimize` with the following fields: + +- ``int funcalls``: number of function calls +- ``int iterations``: number of iterations +- ``int error_num``: error number +- ``double root``: root of function + +The root is copied by `~scipy.optimize.cython_optimize` to the full output +``struct``. An error number of -1 means a sign error, -2 means a convergence +error, and 0 means the solver converged. Continuing from the previous example:: + + from scipy.optimize.cython_optimize cimport zeros_full_output + + + # cython brentq solver with full output + cdef zeros_full_output brentq_full_output_wrapper_example( + dict args, double xa, double xb, double xtol, double rtol, + int mitr): + cdef test_params myargs = args + cdef zeros_full_output my_full_output + # use my_full_output instead of NULL + brentq(f, xa, xb, &myargs, xtol, rtol, mitr, &my_full_output) + return my_full_output + + + # Python function + def brent_full_output_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, + rtol=RTOL, mitr=MITR): + '''Returns full output''' + return brentq_full_output_wrapper_example(args, xa, xb, xtol, rtol, + mitr) + + result = brent_full_output_example() + # {'error_num': 0, + # 'funcalls': 6, + # 'iterations': 5, + # 'root': 0.6999942848231314} +""" diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7f20bbab00d54c047a7d0ffa110b8758738eb48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py new file mode 100644 index 0000000000000000000000000000000000000000..75b395d27396d22c8cbf50a229e5f04c40237171 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LbfgsInvHessProduct', + 'LinearOperator', + 'MemoizeJac', + 'OptimizeResult', + 'array', + 'asarray', + 'float64', + 'fmin_l_bfgs_b', + 'old_bound_to_new', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="lbfgsb", + private_modules=["_lbfgsb_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/linesearch.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/linesearch.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1a04d83ba603cf9162e2cae1a6b8853a538c5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/linesearch.py @@ -0,0 +1,30 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LineSearchWarning', + 'line_search', + 'line_search_BFGS', + 'line_search_armijo', + 'line_search_wolfe1', + 'line_search_wolfe2', + 'minpack2', + 'scalar_search_armijo', + 'scalar_search_wolfe1', + 'scalar_search_wolfe2', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="linesearch", + private_modules=["_linesearch"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack.py new file mode 100644 index 0000000000000000000000000000000000000000..b815dec171af576a84cb5b334d118fda7b576210 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack.py @@ -0,0 +1,52 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LEASTSQ_FAILURE', + 'LEASTSQ_SUCCESS', + 'LinAlgError', + 'OptimizeResult', + 'OptimizeWarning', + 'asarray', + 'atleast_1d', + 'check_gradient', + 'cholesky', + 'curve_fit', + 'dot', + 'dtype', + 'error', + 'eye', + 'finfo', + 'fixed_point', + 'fsolve', + 'greater', + 'inexact', + 'inf', + 'inv', + 'issubdtype', + 'least_squares', + 'leastsq', + 'prepare_bounds', + 'prod', + 'shape', + 'solve_triangular', + 'svd', + 'take', + 'transpose', + 'triu', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack", + private_modules=["_minpack_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack2.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack2.py new file mode 100644 index 0000000000000000000000000000000000000000..6e961f42403a43c39fbf1670827ecd648b8b3987 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/minpack2.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'dcsrch', + 'dcstep', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="minpack2", + private_modules=["_minpack2"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc5884ed5c39437b7681395419d641443a1fdb8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="moduleTNC", + private_modules=["_moduleTNC"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/optimize.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..81e78d097a752e4ab4db404ed2741495c395981c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/optimize.py @@ -0,0 +1,60 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Brent', + 'FD_METHODS', + 'LineSearchWarning', + 'MapWrapper', + 'MemoizeJac', + 'OptimizeResult', + 'OptimizeWarning', + 'ScalarFunction', + 'approx_derivative', + 'approx_fhess_p', + 'approx_fprime', + 'argmin', + 'asarray', + 'atleast_1d', + 'bracket', + 'brent', + 'brute', + 'check_grad', + 'check_random_state', + 'eye', + 'fmin', + 'fmin_bfgs', + 'fmin_cg', + 'fmin_ncg', + 'fmin_powell', + 'fminbound', + 'golden', + 'line_search', + 'line_search_wolfe1', + 'line_search_wolfe2', + 'rosen', + 'rosen_der', + 'rosen_hess', + 'rosen_hess_prod', + 'shape', + 'show_options', + 'sqrt', + 'squeeze', + 'sys', + 'vecnorm', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="optimize", + private_modules=["_optimize"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/slsqp.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/slsqp.py new file mode 100644 index 0000000000000000000000000000000000000000..c225c3cbef7e5bdb733fc9aef878507e957b2a9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/slsqp.py @@ -0,0 +1,37 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'OptimizeResult', + 'append', + 'approx_derivative', + 'approx_jacobian', + 'array', + 'atleast_1d', + 'concatenate', + 'exp', + 'finfo', + 'fmin_slsqp', + 'inf', + 'isfinite', + 'linalg', + 'old_bound_to_new', + 'slsqp', + 'sqrt', + 'vstack', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="slsqp", + private_modules=["_slsqp_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz b/llmeval-env/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz new file mode 100644 index 0000000000000000000000000000000000000000..44f1f1e9ebd1eb188289ca9adb8027855c1a23b6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4859931147d42ce465b8605cb277f957d98b839d03194fdf06579357906d193b +size 589334 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy new file mode 100644 index 0000000000000000000000000000000000000000..721749bcd853fa5c5efe5a1f5ba6e105658395dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254d2dee4a4d547b9331c60243c6fcfcaffd26c8b104d08d4f6045a7645b3bba +size 4064 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..adda664a7b5442fc0977ddbaa572c864ddd31f08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf18c1f2d65a232bf2c7121282df31bf2a8be827afafc4ed810ed37457ee898a +size 183728 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy new file mode 100644 index 0000000000000000000000000000000000000000..80dde74dcda9a23dcdbf9a2f677eb9c98337b0a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef4dc702dd8c6e31c18c74e1f81284c3e9ca2ab50282de39c9ad30b7bb8e76d +size 38624