code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return numpy.log10(evaluation.evaluate_bound( dist, 10**xloc, cache=cache))
def _bnd(self, xloc, dist, cache)
Distribution bounds.
11.143881
10.871576
1.025047
dim = len(dist) if poly.dim < dim: poly = chaospy.poly.setdim(poly, len(dist)) zero = [1]*dim out = numpy.zeros((dim,) + poly.shape, dtype=float) V = Var(poly, dist, **kws) for i in range(dim): zero[i] = 0 out[i] = ((V-Var(E_cond(poly, zero, dist, **kws), dist, **kws)) / (V+(V == 0))**(V!=0)) zero[i] = 1 return out
def Sens_t(poly, dist, **kws)
Variance-based decomposition AKA Sobol' indices Total effect sensitivity index Args: poly (Poly): Polynomial to find first order Sobol indices on. dist (Dist): The distributions of the input used in ``poly``. Returns: (numpy.ndarray) : First order sensitivity indices for each parameters in ``poly``, with shape ``(len(dist),) + poly.shape``. Examples: >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2) >>> indices = chaospy.Sens_t(poly, dist) >>> print(indices) [[0. 1. 0. 0.57142857] [0. 0. 1. 0.57142857]]
4.725328
5.008917
0.943383
for key in kwargs: assert key in LEGAL_ATTRS, "{} is not legal input".format(key) if parent is not None: for key, value in LEGAL_ATTRS.items(): if key not in kwargs and hasattr(parent, value): kwargs[key] = getattr(parent, value) assert "cdf" in kwargs, "cdf function must be defined" assert "bnd" in kwargs, "bnd function must be defined" if "str" in kwargs and isinstance(kwargs["str"], str): string = kwargs.pop("str") kwargs["str"] = lambda *args, **kwargs: string defaults = defaults if defaults else {} for key in defaults: assert key in LEGAL_ATTRS, "invalid default value {}".format(key) def custom_distribution(**kws): prm = defaults.copy() prm.update(kws) dist = Dist(**prm) for key, function in kwargs.items(): attr_name = LEGAL_ATTRS[key] setattr(dist, attr_name, types.MethodType(function, dist)) return dist if "doc" in kwargs: custom_distribution.__doc__ = kwargs["doc"] return custom_distribution
def construct(parent=None, defaults=None, **kwargs)
Random variable constructor. Args: cdf: Cumulative distribution function. Optional if ``parent`` is used. bnd: Boundary interval. Optional if ``parent`` is used. parent (Dist): Distribution used as basis for new distribution. Any other argument that is omitted will instead take is function from ``parent``. doc (str]): Documentation for the distribution. str (str, :py:data:typing.Callable): Pretty print of the variable. pdf: Probability density function. ppf: Point percentile function. mom: Raw moment generator. ttr: Three terms recursion coefficient generator. init: Custom initialiser method. defaults (dict): Default values to provide to initialiser. Returns: (Dist): New custom distribution.
2.917506
2.807802
1.039071
return evaluation.evaluate_density( dist, numpy.arctan(x), cache=cache)/(1+x*x)
def _pdf(self, x, dist, cache)
Probability density function.
11.441137
11.465957
0.997835
orth = chaospy.poly.Poly(orth) nodes = numpy.asfarray(nodes) weights = numpy.asfarray(weights) if callable(solves): solves = [solves(node) for node in nodes.T] solves = numpy.asfarray(solves) shape = solves.shape solves = solves.reshape(weights.size, int(solves.size/weights.size)) ovals = orth(*nodes) vals1 = [(val*solves.T*weights).T for val in ovals] if norms is None: norms = numpy.sum(ovals**2*weights, -1) else: norms = numpy.array(norms).flatten() assert len(norms) == len(orth) coefs = (numpy.sum(vals1, 1).T/norms).T coefs = coefs.reshape(len(coefs), *shape[1:]) approx_model = chaospy.poly.transpose(chaospy.poly.sum(orth*coefs.T, -1)) if retall: return approx_model, coefs return approx_model
def fit_quadrature(orth, nodes, weights, solves, retall=False, norms=None, **kws)
Using spectral projection to create a polynomial approximation over distribution space. Args: orth (chaospy.poly.base.Poly): Orthogonal polynomial expansion. Must be orthogonal for the approximation to be accurate. nodes (numpy.ndarray): Where to evaluate the polynomial expansion and model to approximate. ``nodes.shape==(D,K)`` where ``D`` is the number of dimensions and ``K`` is the number of nodes. weights (numpy.ndarray): Weights when doing numerical integration. ``weights.shape == (K,)`` must hold. solves (numpy.ndarray): The model evaluation to approximate. If `numpy.ndarray` is provided, it must have ``len(solves) == K``. If callable, it must take a single argument X with ``len(X) == D``, and return a consistent numpy compatible shape. norms (numpy.ndarray): In the of TTR using coefficients to estimate the polynomial norm is more stable than manual calculation. Calculated using quadrature if no provided. ``norms.shape == (len(orth),)`` must hold. Returns: (chaospy.poly.base.Poly): Fitted model approximation in the form of an polynomial.
3.146378
2.938619
1.070699
if not isinstance(order, int): orders = numpy.array(order).flatten() dim = orders.size m_order = int(numpy.min(orders)) skew = [order-m_order for order in orders] return sparse_grid(func, m_order, dim, skew) abscissas, weights = [], [] bindex = chaospy.bertran.bindex(order-dim+1, order, dim) if skew is None: skew = numpy.zeros(dim, dtype=int) else: skew = numpy.array(skew, dtype=int) assert len(skew) == dim for idx in range( chaospy.bertran.terms(order, dim) - chaospy.bertran.terms(order-dim, dim)): idb = bindex[idx] abscissa, weight = func(skew+idb) weight *= (-1)**(order-sum(idb))*comb(dim-1, order-sum(idb)) abscissas.append(abscissa) weights.append(weight) abscissas = numpy.concatenate(abscissas, 1) weights = numpy.concatenate(weights, 0) abscissas = numpy.around(abscissas, 15) order = numpy.lexsort(tuple(abscissas)) abscissas = abscissas.T[order].T weights = weights[order] # identify non-unique terms diff = numpy.diff(abscissas.T, axis=0) unique = numpy.ones(len(abscissas.T), bool) unique[1:] = (diff != 0).any(axis=1) # merge duplicate nodes length = len(weights) idx = 1 while idx < length: while idx < length and unique[idx]: idx += 1 idy = idx+1 while idy < length and not unique[idy]: idy += 1 if idy-idx > 1: weights[idx-1] = numpy.sum(weights[idx-1:idy]) idx = idy+1 abscissas = abscissas[:, unique] weights = weights[unique] return abscissas, weights
def sparse_grid(func, order, dim=None, skew=None)
Smolyak sparse grid constructor. Args: func (:py:data:typing.Callable): Function that takes a single argument ``order`` of type ``numpy.ndarray`` and with ``order.shape = (dim,)`` order (int, numpy.ndarray): The order of the grid. If ``numpy.ndarray``, it overrides both ``dim`` and ``skew``. dim (int): Number of dimension. skew (list): Order skewness.
2.760751
2.802157
0.985224
assert len(x_data) == len(distribution) assert len(x_data.shape) == 2 cache = cache if cache is not None else {} parameters = load_parameters( distribution, "_bnd", parameters=parameters, cache=cache) out = numpy.zeros((2,) + x_data.shape) lower, upper = distribution._bnd(x_data.copy(), **parameters) out.T[:, :, 0] = numpy.asfarray(lower).T out.T[:, :, 1] = numpy.asfarray(upper).T cache[distribution] = out return out
def evaluate_bound( distribution, x_data, parameters=None, cache=None, )
Evaluate lower and upper bounds. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate bounds at. Relevant in the case of multivariate distributions where the bounds are affected by the output of other distributions. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The lower and upper bounds of ``distribution`` at location ``x_data`` using parameters ``parameters``.
3.554734
3.421045
1.039079
haspoly = sum([isinstance(arg, Poly) for arg in args]) # Numpy if not haspoly: return numpy.sum(numpy.prod(args, 0), 0) # Poly out = args[0] for arg in args[1:]: out = out * arg return sum(out)
def inner(*args)
Inner product of a polynomial set. Args: args (chaospy.poly.base.Poly): The polynomials to perform inner product on. Returns: (chaospy.poly.base.Poly): Resulting polynomial. Examples: >>> x,y = cp.variable(2) >>> P = cp.Poly([x-1, y]) >>> Q = cp.Poly([x+1, x*y]) >>> print(cp.inner(P, Q)) q0^2+q0q1^2-1 >>> x = numpy.arange(4) >>> print(cp.inner(x, x)) 14
4.71994
7.181394
0.657246
if len(args) > 2: part1 = args[0] part2 = outer(*args[1:]) elif len(args) == 2: part1, part2 = args else: return args[0] dtype = chaospy.poly.typing.dtyping(part1, part2) if dtype in (list, tuple, numpy.ndarray): part1 = numpy.array(part1) part2 = numpy.array(part2) shape = part1.shape + part2.shape return numpy.outer( chaospy.poly.shaping.flatten(part1), chaospy.poly.shaping.flatten(part2), ) if dtype == Poly: if isinstance(part1, Poly) and isinstance(part2, Poly): if (1,) in (part1.shape, part2.shape): return part1*part2 shape = part1.shape+part2.shape out = [] for _ in chaospy.poly.shaping.flatten(part1): out.append(part2*_) return chaospy.poly.shaping.reshape(Poly(out), shape) if isinstance(part1, (int, float, list, tuple)): part2, part1 = numpy.array(part1), part2 else: part2 = numpy.array(part2) core_old = part1.A core_new = {} for key in part1.keys: core_new[key] = outer(core_old[key], part2) shape = part1.shape+part2.shape dtype = chaospy.poly.typing.dtyping(part1.dtype, part2.dtype) return Poly(core_new, part1.dim, shape, dtype) raise NotImplementedError
def outer(*args)
Polynomial outer product. Args: P1 (chaospy.poly.base.Poly, numpy.ndarray): First term in outer product P2 (chaospy.poly.base.Poly, numpy.ndarray): Second term in outer product Returns: (chaospy.poly.base.Poly): Poly set with same dimensions as itter. Examples: >>> x = cp.variable() >>> P = cp.prange(3) >>> print(P) [1, q0, q0^2] >>> print(cp.outer(x, P)) [q0, q0^2, q0^3] >>> print(cp.outer(P, P)) [[1, q0, q0^2], [q0, q0^2, q0^3], [q0^2, q0^3, q0^4]]
2.777348
2.770744
1.002383
if not isinstance(poly1, Poly) and not isinstance(poly2, Poly): return numpy.dot(poly1, poly2) poly1 = Poly(poly1) poly2 = Poly(poly2) poly = poly1*poly2 if numpy.prod(poly1.shape) <= 1 or numpy.prod(poly2.shape) <= 1: return poly return chaospy.poly.sum(poly, 0)
def dot(poly1, poly2)
Dot product of polynomial vectors. Args: poly1 (Poly) : left part of product. poly2 (Poly) : right part of product. Returns: (Poly) : product of poly1 and poly2. Examples: >>> poly = cp.prange(3, 1) >>> print(poly) [1, q0, q0^2] >>> print(cp.dot(poly, numpy.arange(3))) 2q0^2+q0 >>> print(cp.dot(poly, poly)) q0^4+q0^2+1
2.772937
3.216313
0.862148
order = sorted(GENZ_KEISTER_16.keys())[order] abscissas, weights = GENZ_KEISTER_16[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
def quad_genz_keister_16(order)
Hermite Genz-Keister 16 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_16(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
2.614681
3.60144
0.72601
output = evaluation.evaluate_density(dist, numpy.arccosh(x), cache=cache) output /= numpy.where(x != 1, numpy.sqrt(x*x-1), numpy.inf) return output
def _pdf(self, x, dist, cache)
Probability density function.
6.18156
6.430818
0.96124
logger = logging.getLogger(__name__) dim = len(dist) if isinstance(order, int): if order == 0: return chaospy.poly.Poly(1, dim=dim) basis = chaospy.poly.basis( 0, order, dim, sort, cross_truncation=cross_truncation) else: basis = order basis = list(basis) polynomials = [basis[0]] if normed: for idx in range(1, len(basis)): # orthogonalize polynomial: for idy in range(idx): orth = chaospy.descriptives.E( basis[idx]*polynomials[idy], dist, **kws) basis[idx] = basis[idx] - polynomials[idy]*orth # normalize: norms = chaospy.descriptives.E(polynomials[-1]**2, dist, **kws) if norms <= 0: logger.warning("Warning: Polynomial cutoff at term %d", idx) break basis[idx] = basis[idx] / numpy.sqrt(norms) polynomials.append(basis[idx]) else: norms = [1.] for idx in range(1, len(basis)): # orthogonalize polynomial: for idy in range(idx): orth = chaospy.descriptives.E( basis[idx]*polynomials[idy], dist, **kws) basis[idx] = basis[idx] - polynomials[idy] * orth / norms[idy] norms.append( chaospy.descriptives.E(polynomials[-1]**2, dist, **kws)) if norms[-1] <= 0: logger.warning("Warning: Polynomial cutoff at term %d", idx) break polynomials.append(basis[idx]) return chaospy.poly.Poly(polynomials, dim=dim, shape=(len(polynomials),))
def orth_gs(order, dist, normed=False, sort="GR", cross_truncation=1., **kws)
Gram-Schmidt process for generating orthogonal polynomials. Args: order (int, Poly): The upper polynomial order. Alternative a custom polynomial basis can be used. dist (Dist): Weighting distribution(s) defining orthogonality. normed (bool): If True orthonormal polynomials will be used instead of monic. sort (str): Ordering argument passed to poly.basis. If custom basis is used, argument is ignored. cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: (Poly): The orthogonal polynomial expansion. Examples: >>> Z = chaospy.J(chaospy.Normal(), chaospy.Normal()) >>> print(chaospy.around(chaospy.orth_gs(2, Z), 4)) [1.0, q1, q0, q1^2-1.0, q0q1, q0^2-1.0]
2.43097
2.45714
0.989349
from .. import baseclass if cache is None: cache = {} if parameters is None: parameters = {} parameters_ = distribution.prm.copy() parameters_.update(**parameters) parameters = parameters_ # self aware and should handle things itself: if contains_call_signature(getattr(distribution, method_name), "cache"): parameters["cache"] = cache # dumb distribution and just wants to evaluate: else: for key, value in parameters.items(): if isinstance(value, baseclass.Dist): value = cache_key(value) if value in cache: parameters[key] = cache[value] else: raise baseclass.StochasticallyDependentError( "evaluating under-defined distribution {}.".format(distribution)) return parameters
def load_parameters( distribution, method_name, parameters=None, cache=None, cache_key=lambda x:x, )
Load parameter values by filling them in from cache. Args: distribution (Dist): The distribution to load parameters from. method_name (str): Name of the method for where the parameters should be used. Typically ``"_pdf"``, ``_cdf`` or the like. parameters (:py:data:typing.Any): Default parameters to use if there are no cache to retrieve. Use the distributions internal parameters, if not provided. cache (:py:data:typing.Any): A dictionary containing previous evaluations from the stack. If a parameters contains a distribution that contains in the cache, it will be replaced with the cache value. If omitted, a new one will be created. cache_key (:py:data:typing.Any) Redefine the keys of the cache to suite other purposes. Returns: Same as ``parameters``, if provided. The ``distribution`` parameter if not. In either case, parameters may be updated with cache values (if provided) or by ``cache`` if the call signature of ``method_name`` (on ``distribution``) contains an ``cache`` argument.
6.18969
5.913178
1.046762
order = sorted(GENZ_KEISTER_18.keys())[order] abscissas, weights = GENZ_KEISTER_18[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
def quad_genz_keister_18(order)
Hermite Genz-Keister 18 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_18(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
2.628624
3.663012
0.717613
args = list(args) for idx, arg in enumerate(args): if isinstance(arg, Poly): args[idx] = Poly elif isinstance(arg, numpy.generic): args[idx] = numpy.asarray(arg).dtype elif isinstance(arg, (float, int)): args[idx] = type(arg) for type_ in DATATYPES: if type_ in args: return type_ raise ValueError( "dtypes not recognised " + str([str(_) for _ in args]))
def dtyping(*args)
Find least common denominator dtype. Examples: >>> str(dtyping(int, float)) in ("<class 'float'>", "<type 'float'>") True >>> print(dtyping(int, Poly)) <class 'chaospy.poly.base.Poly'>
3.590744
3.664485
0.979877
if limit is None: limit = 10**300 if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = core[key]*1. return Poly(core, vari.dim, vari.shape, float) return numpy.asfarray(vari)
def asfloat(vari, limit=None)
Convert dtype of polynomial coefficients to float. Example: >>> poly = 2*cp.variable()+1 >>> print(poly) 2q0+1 >>> print(cp.asfloat(poly)) 2.0q0+1.0
5.470446
6.028703
0.9074
if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = numpy.asarray(core[key], dtype=int) return Poly(core, vari.dim, vari.shape, int) return numpy.asarray(vari, dtype=int)
def asint(vari)
Convert dtype of polynomial coefficients to float. Example: >>> poly = 1.5*cp.variable()+2.25 >>> print(poly) 1.5q0+2.25 >>> print(cp.asint(poly)) q0+2
5.087651
5.956754
0.854098
if isinstance(vari, Poly): shape = vari.shape out = numpy.asarray( [{} for _ in range(numpy.prod(shape))], dtype=object ) core = vari.A.copy() for key in core.keys(): core[key] = core[key].flatten() for i in range(numpy.prod(shape)): if not numpy.all(core[key][i] == 0): out[i][key] = core[key][i] for i in range(numpy.prod(shape)): out[i] = Poly(out[i], vari.dim, (), vari.dtype) out = out.reshape(shape) return out return numpy.asarray(vari)
def toarray(vari)
Convert polynomial array into a numpy.asarray of polynomials. Args: vari (Poly, numpy.ndarray): Input data. Returns: (numpy.ndarray): A numpy array with ``Q.shape==A.shape``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> array = cp.toarray(poly) >>> print(isinstance(array, numpy.ndarray)) True >>> print(array[1]) q0
3.279006
3.255975
1.007074
output = evaluation.evaluate_density( dist, xloc.reshape(1, -1)).reshape(length, -1) assert xloc.shape == output.shape return output
def _pdf(self, xloc, dist, length, cache)
Probability density function. Example: >>> print(chaospy.Iid(chaospy.Uniform(), 2).pdf( ... [[0.5, 1.5], [0.5, 0.5]])) [1. 0.]
6.091562
13.068237
0.466135
output = evaluation.evaluate_forward( dist, xloc.reshape(1, -1)).reshape(length, -1) assert xloc.shape == output.shape return output
def _cdf(self, xloc, dist, length, cache)
Cumulative distribution function. Example: >>> print(chaospy.Iid(chaospy.Uniform(0, 2), 2).fwd( ... [[0.1, 0.2, 0.3], [0.2, 0.2, 0.3]])) [[0.05 0.1 0.15] [0.1 0.1 0.15]]
6.629615
10.454229
0.634156
output = evaluation.evaluate_inverse( dist, uloc.reshape(1, -1)).reshape(length, -1) assert uloc.shape == output.shape return output
def _ppf(self, uloc, dist, length, cache)
Point percentile function. Example: >>> print(chaospy.Iid(chaospy.Uniform(0, 2), 2).inv( ... [[0.1, 0.2, 0.3], [0.2, 0.2, 0.3]])) [[0.2 0.4 0.6] [0.4 0.4 0.6]]
6.276289
10.344861
0.606706
lower, upper = evaluation.evaluate_bound( dist, xloc.reshape(1, -1)) lower = lower.reshape(length, -1) upper = upper.reshape(length, -1) assert lower.shape == xloc.shape, (lower.shape, xloc.shape) assert upper.shape == xloc.shape return lower, upper
def _bnd(self, xloc, dist, length, cache)
boundary function. Example: >>> print(chaospy.Iid(chaospy.Uniform(0, 2), 2).range( ... [[0.1, 0.2, 0.3], [0.2, 0.2, 0.3]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[2. 2. 2.] [2. 2. 2.]]]
3.047548
3.599135
0.846744
return numpy.prod(dist.mom(k), 0)
def _mom(self, k, dist, length, cache)
Moment generating function. Example: >>> print(chaospy.Iid(chaospy.Uniform(), 2).mom( ... [[0, 0, 1], [0, 1, 1]])) [1. 0.5 0.25]
11.222154
13.542467
0.828664
assert len(x_data) == len(distribution) assert len(x_data.shape) == 2 cache = cache if cache is not None else {} out = numpy.zeros(x_data.shape) # Distribution self know how to handle density evaluation. if hasattr(distribution, "_pdf"): parameters = load_parameters( distribution, "_pdf", parameters=parameters, cache=cache) out[:] = distribution._pdf(x_data, **parameters) # Approximate density evaluation based on cumulative distribution function. else: from .. import approximation parameters = load_parameters( distribution, "_cdf", parameters=parameters, cache=cache) out[:] = approximation.approximate_density( distribution, x_data, parameters=parameters, cache=cache) # dependency handling. if distribution in cache: out = numpy.where(x_data == cache[distribution], out, 0) else: cache[distribution] = x_data return out
def evaluate_density( distribution, x_data, parameters=None, cache=None, )
Evaluate probability density function (PDF). Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate density at. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The probability density values of ``distribution`` at location ``x_data`` using parameters ``parameters``.
3.325184
3.353857
0.991451
core = vari.A.copy() for key in vari.keys: core[key] = sum(core[key], axis) return Poly(core, vari.dim, None, vari.dtype) return np.sum(vari, axis)
def sum(vari, axis=None): # pylint: disable=redefined-builtin if isinstance(vari, Poly)
Sum the components of a shapeable quantity along a given axis. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input data. axis (int): Axis over which the sum is taken. By default ``axis`` is None, and all elements are summed. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Polynomial array with same shape as ``vari``, with the specified axis removed. If ``vari`` is an 0-d array, or ``axis`` is None, a (non-iterable) component is returned. Examples: >>> vari = cp.prange(3) >>> print(vari) [1, q0, q0^2] >>> print(cp.sum(vari)) q0^2+q0+1
3.848225
10.085525
0.381559
if isinstance(vari, Poly): core = vari.A.copy() for key, val in core.items(): core[key] = cumsum(val, axis) return Poly(core, vari.dim, None, vari.dtype) return np.cumsum(vari, axis)
def cumsum(vari, axis=None)
Cumulative sum the components of a shapeable quantity along a given axis. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input data. axis (int): Axis over which the sum is taken. By default ``axis`` is None, and all elements are summed. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Polynomial array with same shape as ``vari``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> print(cp.cumsum(poly)) [1, q0+1, q0^2+q0+1]
3.995687
6.065583
0.658748
if isinstance(vari, Poly): if axis is None: vari = chaospy.poly.shaping.flatten(vari) axis = 0 vari = chaospy.poly.shaping.rollaxis(vari, axis) out = vari[0] for poly in vari[1:]: out = out*poly return out return np.prod(vari, axis)
def prod(vari, axis=None)
Product of the components of a shapeable quantity along a given axis. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input data. axis (int): Axis over which the sum is taken. By default ``axis`` is None, and all elements are summed. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Polynomial array with same shape as ``vari``, with the specified axis removed. If ``vari`` is an 0-d array, or ``axis`` is None, a (non-iterable) component is returned. Examples: >>> vari = cp.prange(3) >>> print(vari) [1, q0, q0^2] >>> print(cp.prod(vari)) q0^3
3.44522
3.754272
0.91768
if isinstance(vari, Poly): if np.prod(vari.shape) == 1: return vari.copy() if axis is None: vari = chaospy.poly.shaping.flatten(vari) axis = 0 vari = chaospy.poly.shaping.rollaxis(vari, axis) out = [vari[0]] for poly in vari[1:]: out.append(out[-1]*poly) return Poly(out, vari.dim, vari.shape, vari.dtype) return np.cumprod(vari, axis)
def cumprod(vari, axis=None)
Perform the cumulative product of a shapeable quantity over a given axis. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input data. axis (int): Axis over which the sum is taken. By default ``axis`` is None, and all elements are summed. Returns: (chaospy.poly.base.Poly): An array shaped as ``vari`` but with the specified axis removed. Examples: >>> vari = cp.prange(4) >>> print(vari) [1, q0, q0^2, q0^3] >>> print(cp.cumprod(vari)) [1, q0, q0^3, q0^6]
3.446298
3.655376
0.942802
from chaospy.distributions import evaluation if len(dist) > 1 and evaluation.get_dependencies(*list(dist)): raise evaluation.DependencyError( "Leja quadrature do not supper distribution with dependencies.") if len(dist) > 1: if isinstance(order, int): out = [quad_leja(order, _) for _ in dist] else: out = [quad_leja(order[_], dist[_]) for _ in range(len(dist))] abscissas = [_[0][0] for _ in out] weights = [_[1] for _ in out] abscissas = chaospy.quad.combine(abscissas).T weights = chaospy.quad.combine(weights) weights = numpy.prod(weights, -1) return abscissas, weights lower, upper = dist.range() abscissas = [lower, dist.mom(1), upper] for _ in range(int(order)): obj = create_objective(dist, abscissas) opts, vals = zip( *[fminbound( obj, abscissas[idx], abscissas[idx+1], full_output=1)[:2] for idx in range(len(abscissas)-1)] ) index = numpy.argmin(vals) abscissas.insert(index+1, opts[index]) abscissas = numpy.asfarray(abscissas).flatten()[1:-1] weights = create_weights(abscissas, dist) abscissas = abscissas.reshape(1, abscissas.size) return numpy.array(abscissas), numpy.array(weights)
def quad_leja(order, dist)
Generate Leja quadrature node. Example: >>> abscisas, weights = quad_leja(3, chaospy.Normal(0, 1)) >>> print(numpy.around(abscisas, 4)) [[-2.7173 -1.4142 0. 1.7635]] >>> print(numpy.around(weights, 4)) [0.022 0.1629 0.6506 0.1645]
3.07805
3.063303
1.004814
abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
def create_objective(dist, abscissas)
Create objective function.
4.990664
4.995146
0.999103
poly = chaospy.quad.generate_stieltjes(dist, len(nodes)-1, retall=True)[0] poly = chaospy.poly.flatten(chaospy.poly.Poly(poly)) weights_inverse = poly(nodes) weights = numpy.linalg.inv(weights_inverse) return weights[:, 0]
def create_weights(nodes, dist)
Create weights for the Laja method.
5.279099
5.383077
0.980684
if not scipy.sparse.issparse(mat): mat = numpy.asfarray(mat) assert numpy.allclose(mat, mat.T) size = mat.shape[0] mat_diag = mat.diagonal() gamma = abs(mat_diag).max() off_diag = abs(mat - numpy.diag(mat_diag)).max() delta = eps*max(gamma + off_diag, 1) beta = numpy.sqrt(max(gamma, off_diag/size, eps)) lowtri = _gill_king(mat, beta, delta) return lowtri
def gill_king(mat, eps=1e-16)
Gill-King algorithm for modified cholesky decomposition. Args: mat (numpy.ndarray): Must be a non-singular and symmetric matrix. If sparse, the result will also be sparse. eps (float): Error tolerance used in algorithm. Returns: (numpy.ndarray): Lower triangular Cholesky factor. Examples: >>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]] >>> lowtri = gill_king(mat) >>> print(numpy.around(lowtri, 4)) [[2. 0. 0. ] [1. 2.2361 0. ] [0.5 1.118 1.2264]] >>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4)) [[4. 2. 1. ] [2. 6. 3. ] [1. 3. 3.004]]
4.01831
4.075337
0.986007
size = mat.shape[0] # initialize d_vec and lowtri if scipy.sparse.issparse(mat): lowtri = scipy.sparse.eye(*mat.shape) else: lowtri = numpy.eye(size) d_vec = numpy.zeros(size, dtype=float) # there are no inner for loops, everything implemented with # vector operations for a reasonable level of efficiency for idx in range(size): if idx == 0: idz = [] # column index: all columns to left of diagonal # d_vec(idz) doesn't work in case idz is empty else: idz = numpy.s_[:idx] djtemp = mat[idx, idx] - numpy.dot( lowtri[idx, idz], d_vec[idz]*lowtri[idx, idz].T) # C(idx, idx) in book if idx < size - 1: idy = numpy.s_[idx+1:size] # row index: all rows below diagonal ccol = mat[idy, idx] - numpy.dot( lowtri[idy, idz], d_vec[idz]*lowtri[idx, idz].T) # C(idy, idx) in book theta = abs(ccol).max() # guarantees d_vec(idx) not too small and lowtri(idy, idx) not too # big in sufficiently positive definite case, d_vec(idx) = djtemp d_vec[idx] = max(abs(djtemp), (theta/beta)**2, delta) lowtri[idy, idx] = ccol/d_vec[idx] else: d_vec[idx] = max(abs(djtemp), delta) # convert to usual output format: replace lowtri by lowtri*sqrt(D) and # transpose for idx in range(size): lowtri[:, idx] = lowtri[:, idx]*numpy.sqrt(d_vec[idx]) # lowtri = lowtri*diag(sqrt(d_vec)) bad in sparse case return lowtri
def _gill_king(mat, beta, delta)
Backend function for the Gill-King algorithm.
4.82156
4.814476
1.001471
random_state = numpy.random.get_state() numpy.random.seed(seed) forward = partial(evaluation.evaluate_forward, cache=cache, distribution=distribution, parameters=parameters) dim = len(distribution) upper = numpy.ones((dim, 1)) for _ in range(100): indices = forward(x_data=upper) < 1 if not numpy.any(indices): break upper[indices] *= 2 lower = -numpy.ones((dim, 1)) for _ in range(100): indices = forward(x_data=lower) > 0 if not numpy.any(indices): break lower[indices] *= 2 for _ in range(iterations): rand = numpy.random.random(dim) proposal = (rand*lower.T + (1-rand)*upper.T).T evals = forward(x_data=proposal) indices0 = evals > 0 indices1 = evals < 1 range_ = numpy.random.choice(dim, size=dim, replace=False) upper_ = numpy.where(indices1, upper, evals) for idx in range_: if upper.flatten()[idx] == upper_.flatten()[idx]: continue if numpy.all(forward(x_data=upper_) == 1): upper = upper_ break upper_[idx] = upper[idx] lower_ = numpy.where(indices0, lower, evals) for idx in range_: if lower.flatten()[idx] == lower_.flatten()[idx]: continue if numpy.all(forward(x_data=lower_) == 0): lower = lower_ break lower_[idx] = lower[idx] if numpy.all(indices0 & indices1): break else: if retall: return proposal, lower, upper return proposal raise evaluation.DependencyError( "Too many iterations required to find interior point.") numpy.random.set_state(random_state) if retall: return proposal, lower, upper return proposal
def find_interior_point( distribution, parameters=None, cache=None, iterations=1000, retall=False, seed=None, )
Find interior point of the distribution where forward evaluation is guarantied to be both ``distribution.fwd(xloc) > 0`` and ``distribution.fwd(xloc) < 1``. Args: distribution (Dist): Distribution to find interior on. parameters (Optional[Dict[Dist, numpy.ndarray]]): Parameters for the distribution. cache (Optional[Dict[Dist, numpy.ndarray]]): Memory cache for the location in the evaluation so far. iterations (int): The number of iterations allowed to be performed retall (bool): If provided, lower and upper bound which guaranties that ``distribution.fwd(lower) == 0`` and ``distribution.fwd(upper) == 1`` is returned as well. seed (Optional[int]): Fix random seed. Returns: numpy.ndarray: An input array with shape ``(len(distribution),)`` which is guarantied to be on the interior of the probability distribution. Example: >>> distribution = chaospy.MvNormal([1, 2, 3], numpy.eye(3)+.03) >>> midpoint, lower, upper = find_interior_point( ... distribution, retall=True, seed=1234) >>> print(lower.T) [[-64. -64. -64.]] >>> print(numpy.around(midpoint, 4).T) [[ 0.6784 -33.7687 -19.0182]] >>> print(upper.T) [[16. 16. 16.]] >>> distribution = chaospy.Uniform(1000, 1010) >>> midpoint, lower, upper = find_interior_point( ... distribution, retall=True, seed=1234) >>> print(numpy.around(lower, 4)) [[-1.]] >>> print(numpy.around(midpoint, 4)) [[1009.8873]] >>> print(numpy.around(upper, 4)) [[1024.]]
2.581609
2.690463
0.959541
dim = len(dist) shape = K.shape size = int(K.size/dim) K = K.reshape(dim, size) if dim > 1: shape = shape[1:] X, W = quad.generate_quadrature(order, dist, rule=rule, normalize=True, **kws) grid = numpy.mgrid[:len(X[0]), :size] X = X.T[grid[0]].T K = K.T[grid[1]].T out = numpy.prod(X**K, 0)*W if control_var is not None: Y = control_var.ppf(dist.fwd(X)) mu = control_var.mom(numpy.eye(len(control_var))) if (mu.size == 1) and (dim > 1): mu = mu.repeat(dim) for d in range(dim): alpha = numpy.cov(out, Y[d])[0, 1]/numpy.var(Y[d]) out -= alpha*(Y[d]-mu) out = numpy.sum(out, -1) return out
def approximate_moment( dist, K, retall=False, control_var=None, rule="F", order=1000, **kws )
Approximation method for estimation of raw statistical moments. Args: dist : Dist Distribution domain with dim=len(dist) K : numpy.ndarray The exponents of the moments of interest with shape (dim,K). control_var : Dist If provided will be used as a control variable to try to reduce the error. acc (:py:data:typing.Optional[int]): The order of quadrature/MCI sparse : bool If True used Smolyak's sparse grid instead of normal tensor product grid in numerical integration. rule : str Quadrature rule Key Description ---- ----------- "G" Optiomal Gaussian quadrature from Golub-Welsch Slow for high order and composit is ignored. "E" Gauss-Legendre quadrature "C" Clenshaw-Curtis quadrature. Exponential growth rule is used when sparse is True to make the rule nested. Monte Carlo Integration Key Description ---- ----------- "H" Halton sequence "K" Korobov set "L" Latin hypercube sampling "M" Hammersley sequence "R" (Pseudo-)Random sampling "S" Sobol sequence composite (:py:data:typing.Optional[int, numpy.ndarray]): If provided, composite quadrature will be used. Ignored in the case if gaussian=True. If int provided, determines number of even domain splits If array of ints, determines number of even domain splits along each axis If array of arrays/floats, determines location of splits antithetic (:py:data:typing.Optional[numpy.ndarray]): List of bool. Represents the axes to mirror using antithetic variable during MCI.
4.123574
4.03844
1.021081
if parameters is None: parameters = dist.prm.copy() if cache is None: cache = {} xloc = numpy.asfarray(xloc) lo, up = numpy.min(xloc), numpy.max(xloc) mu = .5*(lo+up) eps = numpy.where(xloc < mu, eps, -eps)*xloc floc = evaluation.evaluate_forward( dist, xloc, parameters=parameters.copy(), cache=cache.copy()) for d in range(len(dist)): xloc[d] += eps[d] tmp = evaluation.evaluate_forward( dist, xloc, parameters=parameters.copy(), cache=cache.copy()) floc[d] -= tmp[d] xloc[d] -= eps[d] floc = numpy.abs(floc / eps) return floc
def approximate_density( dist, xloc, parameters=None, cache=None, eps=1.e-7 )
Approximate the probability density function. Args: dist : Dist Distribution in question. May not be an advanced variable. xloc : numpy.ndarray Location coordinates. Requires that xloc.shape=(len(dist), K). eps : float Acceptable error level for the approximations retall : bool If True return Graph with the next calculation state with the approximation. Returns: numpy.ndarray: Local probability density function with ``out.shape == xloc.shape``. To calculate actual density function, evaluate ``numpy.prod(out, 0)``. Example: >>> distribution = chaospy.Normal(1000, 10) >>> xloc = numpy.array([[990, 1000, 1010]]) >>> print(numpy.around(approximate_density(distribution, xloc), 4)) [[0.0242 0.0399 0.0242]] >>> print(numpy.around(distribution.pdf(xloc), 4)) [[0.0242 0.0399 0.0242]]
2.804732
3.227944
0.868891
assert number_base > 1 idx = numpy.asarray(idx).flatten() + 1 out = numpy.zeros(len(idx), dtype=float) base = float(number_base) active = numpy.ones(len(idx), dtype=bool) while numpy.any(active): out[active] += (idx[active] % number_base)/base idx //= number_base base *= number_base active = idx > 0 return out
def create_van_der_corput_samples(idx, number_base=2)
Van der Corput samples. Args: idx (int, numpy.ndarray): The index of the sequence. If array is provided, all values in array is returned. number_base (int): The numerical base from where to create the samples from. Returns (float, numpy.ndarray): Van der Corput samples.
3.379241
3.658727
0.923611
if len(args) > 2: return add(args[0], add(args[1], args[1:])) if len(args) == 1: return args[0] part1, part2 = args if isinstance(part2, Poly): if part2.dim > part1.dim: part1 = chaospy.dimension.setdim(part1, part2.dim) elif part2.dim < part1.dim: part2 = chaospy.dimension.setdim(part2, part1.dim) dtype = chaospy.poly.typing.dtyping(part1.dtype, part2.dtype) core1 = part1.A.copy() core2 = part2.A.copy() if np.prod(part2.shape) > np.prod(part1.shape): shape = part2.shape ones = np.ones(shape, dtype=int) for key in core1: core1[key] = core1[key]*ones else: shape = part1.shape ones = np.ones(shape, dtype=int) for key in core2: core2[key] = core2[key]*ones for idx in core1: if idx in core2: core2[idx] = core2[idx] + core1[idx] else: core2[idx] = core1[idx] out = core2 return Poly(out, part1.dim, shape, dtype) part2 = np.asarray(part2) core = part1.A.copy() dtype = chaospy.poly.typing.dtyping(part1.dtype, part2.dtype) zero = (0,)*part1.dim if zero not in core: core[zero] = np.zeros(part1.shape, dtype=int) core[zero] = core[zero] + part2 if np.prod(part2.shape) > np.prod(part1.shape): ones = np.ones(part2.shape, dtype=dtype) for key in core: core[key] = core[key]*ones return Poly(core, part1.dim, None, dtype)
def add(*args)
Polynomial addition.
2.23145
2.16917
1.028711
if len(args) > 2: return add(args[0], add(args[1], args[1:])) if len(args) == 1: return args[0] part1, part2 = args if not isinstance(part2, Poly): if isinstance(part2, (float, int)): part2 = np.asarray(part2) if not part2.shape: core = part1.A.copy() dtype = chaospy.poly.typing.dtyping( part1.dtype, part2.dtype) for key in part1.keys: core[key] = np.asarray(core[key]*part2, dtype) return Poly(core, part1.dim, part1.shape, dtype) part2 = Poly(part2) if part2.dim > part1.dim: part1 = chaospy.dimension.setdim(part1, part2.dim) elif part2.dim < part1.dim: part2 = chaospy.dimension.setdim(part2, part1.dim) if np.prod(part1.shape) >= np.prod(part2.shape): shape = part1.shape else: shape = part2.shape dtype = chaospy.poly.typing.dtyping(part1.dtype, part2.dtype) if part1.dtype != part2.dtype: if part1.dtype == dtype: part2 = chaospy.poly.typing.asfloat(part2) else: part1 = chaospy.poly.typing.asfloat(part1) core = {} for idx1 in part2.A: for idx2 in part1.A: key = tuple(np.array(idx1) + np.array(idx2)) core[key] = np.asarray( core.get(key, 0) + part2.A[idx1]*part1.A[idx2]) core = {key: value for key, value in core.items() if np.any(value)} out = Poly(core, part1.dim, shape, dtype) return out
def mul(*args)
Polynomial multiplication.
2.389755
2.35039
1.016748
if self.eps == 0: return [xmin, center, xmax], [0, 0, xmax] else: n = float(resolution_inside)/self.eps x = np.concatenate(( np.linspace(xmin, center-self.eps, resolution_outside+1), np.linspace(center-self.eps, center+self.eps, n+1), np.linspace(center+self.eps, xmax, resolution_outside+1))) y = self(x) return x, y
def plot(self, xmin=-1, xmax=1, center=0, resolution_outside=20, resolution_inside=200)
Return arrays x, y for plotting the Heaviside function H(x-`center`) on [`xmin`, `xmax`]. For the exact Heaviside function, ``x = [xmin, center, xmax]; y = [0, 0, 1]``, while for the smoothed version, the ``x`` array is computed on basis of the `eps` parameter, with `resolution_outside` intervals on each side of the smoothed region and `resolution_inside` intervals in the smoothed region.
2.648593
2.383379
1.111277
if xmin > self.L or xmax < self.R: raise ValueError('xmin=%g > L=%g or xmax=%g < R=%g is meaningless for plot' % (xmin, self.L, xmax, self.R)) if self.eps_L == 0 and self.eps_R == 0: return ([xmin, self.L, self.L, self.R, self.R, xmax], [0, 0, 1, 1, 0, 0]) else: n = float(resolution_inside)/(0.5*(self.eps_L + self.eps_R)) x = np.concatenate(( np.linspace(xmin, self.L-self.eps_L, resolution_outside+1), np.linspace(self.L-self.eps_L, self.R+self.eps_R, resolution_inside+1), np.linspace(self.R+self.eps_R, xmax, resolution_outside+1))) y = self(x) return x, y
def plot(self, xmin=-1, xmax=1, resolution_outside=20, resolution_inside=200)
Return arrays x, y for plotting IndicatorFunction on [`xmin`, `xmax`]. For the exact discontinuous indicator function, we typically have ``x = [xmin, L, L, R, R, xmax]; y = [0, 0, 1, 1, 0, 0]``, while for the smoothed version, the densities of coordinates in the ``x`` array is computed on basis of the `eps` parameter with `resolution_outside` plotting intervals outside the smoothed regions and `resolution_inside` intervals inside the smoothed regions.
2.367666
2.128472
1.112378
if self.eps == 0: x = []; y = [] for I, value in zip(self._indicator_functions, self._values): x.append(I.L) y.append(value) x.append(I.R) y.append(value) return x, y else: n = float(resolution_smooth_regions)/self.eps if len(self.data) == 1: return [self.L, self.R], [self._values[0], self._values[0]] else: x = [np.linspace(self.data[0][0], self.data[1][0]-self.eps, resolution_constant_regions+1)] # Iterate over all internal discontinuities for I in self._indicator_functions[1:]: x.append(np.linspace(I.L-self.eps, I.L+self.eps, resolution_smooth_regions+1)) x.append(np.linspace(I.L+self.eps, I.R-self.eps, resolution_constant_regions+1)) # Last part x.append(np.linspace(I.R-self.eps, I.R, 3)) x = np.concatenate(x) y = self(x) return x, y
def plot(self, resolution_constant_regions=20, resolution_smooth_regions=200)
Return arrays x, y for plotting the piecewise constant function. Just the minimum number of straight lines are returned if ``eps=0``, otherwise `resolution_constant_regions` plotting intervals are insed in the constant regions with `resolution_smooth_regions` plotting intervals in the smoothed regions.
2.725188
2.528022
1.077992
poly = Poly(poly) diffvar = Poly(diffvar) if not chaospy.poly.caller.is_decomposed(diffvar): sum(differential(poly, chaospy.poly.caller.decompose(diffvar))) if diffvar.shape: return Poly([differential(poly, pol) for pol in diffvar]) if diffvar.dim > poly.dim: poly = chaospy.poly.dimension.setdim(poly, diffvar.dim) else: diffvar = chaospy.poly.dimension.setdim(diffvar, poly.dim) qkey = diffvar.keys[0] core = {} for key in poly.keys: newkey = np.array(key) - np.array(qkey) if np.any(newkey < 0): continue newkey = tuple(newkey) core[newkey] = poly.A[key] * np.prod( [fac(key[idx], exact=True) / fac(newkey[idx], exact=True) for idx in range(poly.dim)]) return Poly(core, poly.dim, poly.shape, poly.dtype)
def differential(poly, diffvar)
Polynomial differential operator. Args: poly (Poly) : Polynomial to be differentiated. diffvar (Poly) : Polynomial to differentiate by. Must be decomposed. If polynomial array, the output is the Jacobian matrix. Examples: >>> q0, q1 = chaospy.variable(2) >>> poly = chaospy.Poly([1, q0, q0*q1**2+1]) >>> print(poly) [1, q0, q0q1^2+1] >>> print(differential(poly, q0)) [0, 1, q1^2] >>> print(differential(poly, q1)) [0, 0, 2q0q1]
3.685789
3.64904
1.010071
return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))
def gradient(poly)
Gradient of a polynomial. Args: poly (Poly) : polynomial to take gradient of. Returns: (Poly) : The resulting gradient. Examples: >>> q0, q1, q2 = chaospy.variable(3) >>> poly = 2*q0 + q1*q2 >>> print(chaospy.gradient(poly)) [2, q2, q1]
20.786161
30.576591
0.679806
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left**right, left**right else: output = numpy.ones(xloc.shape) left = left * output assert numpy.all(left >= 0), "root of negative number" indices = xloc > 0 output[indices] = numpy.log(xloc[indices]) output[~indices] = -numpy.inf indices = left != 1 output[indices] /= numpy.log(left[indices]) output = evaluation.evaluate_bound(right, output, cache=cache) output = left**output output[:] = ( numpy.where(output[0] < output[1], output[0], output[1]), numpy.where(output[0] < output[1], output[1], output[0]), ) return output output = numpy.zeros(xloc.shape) right = right + output indices = right > 0 output[indices] = numpy.abs(xloc[indices])**(1/right[indices]) output[indices] *= numpy.sign(xloc[indices]) output[right == 0] = 1 output[(xloc == 0) & (right < 0)] = numpy.inf output = evaluation.evaluate_bound(left, output, cache=cache) pair = right % 2 == 0 bnd_ = numpy.empty(output.shape) bnd_[0] = numpy.where(pair*(output[0]*output[1] < 0), 0, output[0]) bnd_[0] = numpy.where(pair*(output[0]*output[1] > 0), \ numpy.min(numpy.abs(output), 0), bnd_[0])**right bnd_[1] = numpy.where(pair, numpy.max(numpy.abs(output), 0), output[1])**right bnd_[0], bnd_[1] = ( numpy.where(bnd_[0] < bnd_[1], bnd_[0], bnd_[1]), numpy.where(bnd_[0] < bnd_[1], bnd_[1], bnd_[0]), ) return bnd_
def _bnd(self, xloc, left, right, cache)
Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(chaospy.Pow(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).range([-2, 0, 2, 4])) [[0.5 0.5 0.5 0.5] [1. 1. 1. 1. ]] >>> print(chaospy.Pow(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[1. 1. 1. 1.] [2. 2. 2. 2.]] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).range([-2, 0, 2, 4])) [[0.5 0.5 0.5 0.5] [1. 1. 1. 1. ]] >>> print(chaospy.Pow(2, 3).range([-2, 0, 2, 4])) [[8. 8. 8. 8.] [8. 8. 8. 8.]]
2.671868
2.72842
0.979273
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.inf else: assert numpy.all(left > 0), "imaginary result" y = (numpy.log(numpy.abs(xloc) + 1.*(xloc <= 0)) / numpy.log(numpy.abs(left)+1.*(left == 1))) out = evaluation.evaluate_forward(right, y) out = numpy.where(xloc <= 0, 0., out) return out y = numpy.sign(xloc)*numpy.abs(xloc)**(1./right) pairs = numpy.sign(xloc**right) != -1 out1, out2 = ( evaluation.evaluate_forward(left, y, cache=cache), evaluation.evaluate_forward(left, -y, cache=cache), ) out = numpy.where(right < 0, 1-out1, out1-pairs*out2) return out
def _cdf(self, xloc, left, right, cache)
Cumulative distribution function. Example: >>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.5 1. 1. ] >>> print(chaospy.Pow(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.70710678 1. 1. ] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).fwd([0.4, 0.6, 0.8, 1.2])) [0. 0.33333333 0.75 1. ] >>> print(chaospy.Pow(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.5849625 1. ] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).fwd([0.4, 0.6, 0.8, 1.2])) [0. 0.26303441 0.67807191 1. ] >>> print(chaospy.Pow(2, 3).fwd([7, 8, 9])) [0. 1. 1.]
4.120614
4.318824
0.954106
left = evaluation.get_inverse_cache(left, cache) right = evaluation.get_inverse_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left**right else: out = evaluation.evaluate_inverse(right, q, cache=cache) out = numpy.where(left < 0, 1-out, out) out = left**out return out right = right + numpy.zeros(q.shape) q = numpy.where(right < 0, 1-q, q) out = evaluation.evaluate_inverse(left, q, cache=cache)**right return out
def _ppf(self, q, left, right, cache)
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])) [0.01 0.04 0.81] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9])) [0.52631579 0.55555556 0.90909091] >>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [1.07177346 1.14869835 1.86606598] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9])) [0.53588673 0.57434918 0.93303299] >>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9])) [8. 8. 8.]
3.688947
3.934699
0.937542
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.inf else: assert numpy.all(left > 0), "imaginary result" x_ = numpy.where(xloc <= 0, -numpy.inf, numpy.log(xloc + 1.*(xloc<=0))/numpy.log(left+1.*(left == 1))) num_ = numpy.log(left+1.*(left == 1))*xloc num_ = num_ + 1.*(num_==0) out = evaluation.evaluate_density(right, x_, cache=cache)/num_ return out x_ = numpy.sign(xloc)*numpy.abs(xloc)**(1./right -1) xloc = numpy.sign(xloc)*numpy.abs(xloc)**(1./right) pairs = numpy.sign(xloc**right) == 1 out = evaluation.evaluate_density(left, xloc, cache=cache) if numpy.any(pairs): out = out + pairs*evaluation.evaluate_density(left, -xloc, cache=cache) out = numpy.sign(right)*out * x_ / right out[numpy.isnan(out)] = numpy.inf return out
def _pdf(self, xloc, left, right, cache)
Probability density function. Example: >>> print(chaospy.Uniform().pdf([-0.5, 0.5, 1.5, 2.5])) [0. 1. 0. 0.] >>> print(chaospy.Pow(chaospy.Uniform(), 2).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0.70710678 0. 0. ] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).pdf([0.4, 0.6, 0.8, 1.2])) [0. 2.77777778 1.5625 0. ] >>> print(chaospy.Pow(2, chaospy.Uniform()).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.96179669 0. ] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).pdf([0.4, 0.6, 0.8, 1.2])) [0. 2.40449173 1.8033688 0. ] >>> print(chaospy.Pow(2, 3).pdf([7, 8, 9])) [ 0. inf 0.]
4.009871
4.158485
0.964262
if isinstance(right, Dist): raise StochasticallyDependentError( "distribution as exponent not supported.") if not isinstance(left, Dist): return left**(right*k) if numpy.any(right < 0): raise StochasticallyDependentError( "distribution to negative power not supported.") if not numpy.allclose(right, numpy.array(right, dtype=int)): raise StochasticallyDependentError( "distribution to fractional power not supported.") return evaluation.evaluate_moment(left, k*right, cache=cache)
def _mom(self, k, left, right, cache)
Statistical moments. Example: >>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4)) [1. 0.5 0.3333 0.25 ] >>> print(numpy.around(chaospy.Pow(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4)) [1. 0.3333 0.2 0.1429] >>> print(numpy.around(chaospy.Pow(chaospy.Uniform(1, 2), -1).mom([0, 1, 2, 3]), 4)) [1. 0.6931 0.5 0.375 ] >>> print(numpy.around(chaospy.Pow(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4)) [1. 1.4427 2.164 3.3663] >>> print(numpy.around(chaospy.Pow(2, chaospy.Uniform(-1, 0)).mom([0, 1, 2, 3]), 4)) [1. 0.7213 0.541 0.4208] >>> print(numpy.around(chaospy.Pow(2, 1).mom([0, 1, 2, 3]), 4)) [1. 2. 4. 8.]
4.560004
4.315637
1.056624
assert len(k_data) == len(distribution), ( "distribution %s is not of length %d" % (distribution, len(k_data))) assert len(k_data.shape) == 1 def cache_key(distribution): return (tuple(k_data), distribution) if cache is None: cache = {} else: if cache_key(distribution) in cache: return cache[cache_key(distribution)] try: parameters = load_parameters( distribution, "_ttr", parameters, cache, cache_key) coeff1, coeff2 = distribution._ttr(k_data, **parameters) except NotImplementedError: from ... import quad _, _, coeff1, coeff2 = quad.stieltjes._stieltjes_approx( distribution, order=numpy.max(k_data), accuracy=100, normed=False) range_ = numpy.arange(len(distribution), dtype=int) coeff1 = coeff1[range_, k_data] coeff2 = coeff2[range_, k_data] out = numpy.zeros((2,) + k_data.shape) out.T[:, 0] = numpy.asarray(coeff1).T out.T[:, 1] = numpy.asarray(coeff2).T if len(distribution) == 1: out = out[:, 0] return out
def evaluate_recurrence_coefficients( distribution, k_data, parameters=None, cache=None, )
Evaluate three terms recurrence coefficients (TTR). Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate recurrence coefficients for. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The recurrence coefficients ``A`` and ``B`` of ``distribution`` at location ``x_data`` using parameters ``parameters``.
3.397789
3.266083
1.040325
values = numpy.empty(dim) values[0] = 1 for idx in range(1, dim): values[idx] = base*values[idx-1] % (order+1) grid = numpy.mgrid[:dim, :order+1] out = values[grid[0]] * (grid[1]+1) / (order+1.) % 1. return out[:, :order]
def create_korobov_samples(order, dim, base=17797)
Create Korobov lattice samples. Args: order (int): The order of the Korobov latice. Defines the number of samples. dim (int): The number of dimensions in the output. base (int): The number based used to calculate the distribution of values. Returns (numpy.ndarray): Korobov lattice with ``shape == (dim, order)``
3.998649
4.228079
0.945737
return evaluation.evaluate_forward(dist, numpy.e**xloc, cache=cache)
def _cdf(self, xloc, dist, cache)
Cumulative distribution function.
15.80227
12.797116
1.234831
return numpy.log(evaluation.evaluate_bound( dist, numpy.e**xloc, cache=cache))
def _bnd(self, xloc, dist, cache)
Distribution bounds.
14.761956
14.341515
1.029316
numpy.random.seed(1000) def foo(coord, param): return param[0] * numpy.e ** (-param[1] * coord) coord = numpy.linspace(0, 10, 200) distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2)) samples = distribution.sample(50) evals = numpy.array([foo(coord, sample) for sample in samples.T]) plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2) plt.xlabel(r"\verb;coord;") plt.ylabel(r"function evaluations \verb;foo;") plt.savefig("demonstration.png") plt.clf() samples = distribution.sample(1000, "H") evals = [foo(coord, sample) for sample in samples.T] expected = numpy.mean(evals, 0) deviation = numpy.std(evals, 0) plt.fill_between( coord, expected-deviation, expected+deviation, color="k", alpha=0.3 ) plt.plot(coord, expected, "k--", lw=3) plt.xlabel(r"\verb;coord;") plt.ylabel(r"function evaluations \verb;foo;") plt.title("Results using Monte Carlo simulation") plt.savefig("results_montecarlo.png") plt.clf() polynomial_expansion = cp.orth_ttr(8, distribution) foo_approx = cp.fit_regression(polynomial_expansion, samples, evals) expected = cp.E(foo_approx, distribution) deviation = cp.Std(foo_approx, distribution) plt.fill_between( coord, expected-deviation, expected+deviation, color="k", alpha=0.3 ) plt.plot(coord, expected, "k--", lw=3) plt.xlabel(r"\verb;coord;") plt.ylabel(r"function evaluations \verb;foo;") plt.title("Results using point collocation method") plt.savefig("results_collocation.png") plt.clf() absissas, weights = cp.generate_quadrature(8, distribution, "C") evals = [foo(coord, val) for val in absissas.T] foo_approx = cp.fit_quadrature(polynomial_expansion, absissas, weights, evals) expected = cp.E(foo_approx, distribution) deviation = cp.Std(foo_approx, distribution) plt.fill_between( coord, expected-deviation, expected+deviation, color="k", alpha=0.3 ) plt.plot(coord, expected, "k--", lw=3) plt.xlabel(r"\verb;coord;") plt.ylabel(r"function evaluations \verb;foo;") plt.title("Results using psuedo-spectral projection method") plt.savefig("results_spectral.png") plt.clf()
def plot_figures()
Plot figures for tutorial.
2.43712
2.422279
1.006127
assert isinstance(rule, int) if len(dist) > 1: if isinstance(order, int): values = [quad_genz_keister(order, d, rule) for d in dist] else: values = [quad_genz_keister(order[i], dist[i], rule) for i in range(len(dist))] abscissas = [_[0][0] for _ in values] abscissas = chaospy.quad.combine(abscissas).T weights = [_[1] for _ in values] weights = np.prod(chaospy.quad.combine(weights), -1) return abscissas, weights foo = chaospy.quad.genz_keister.COLLECTION[rule] abscissas, weights = foo(order) abscissas = dist.inv(scipy.special.ndtr(abscissas)) abscissas = abscissas.reshape(1, abscissas.size) return abscissas, weights
def quad_genz_keister(order, dist, rule=24)
Genz-Keister quadrature rule. Eabsicassample: >>> abscissas, weights = quad_genz_keister( ... order=1, dist=chaospy.Uniform(0, 1)) >>> print(numpy.around(abscissas, 4)) [[0.0416 0.5 0.9584]] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
2.777441
2.779853
0.999132
shape = poly.shape poly = polynomials.flatten(poly) q = numpy.array(q)/100. dim = len(dist) # Interior Z = dist.sample(sample, **kws) if dim==1: Z = (Z, ) q = numpy.array([q]) poly1 = poly(*Z) # Min/max mi, ma = dist.range().reshape(2, dim) ext = numpy.mgrid[(slice(0, 2, 1), )*dim].reshape(dim, 2**dim).T ext = numpy.where(ext, mi, ma).T poly2 = poly(*ext) poly2 = numpy.array([_ for _ in poly2.T if not numpy.any(numpy.isnan(_))]).T # Finish if poly2.shape: poly1 = numpy.concatenate([poly1, poly2], -1) samples = poly1.shape[-1] poly1.sort() out = poly1.T[numpy.asarray(q*(samples-1), dtype=int)] out = out.reshape(q.shape + shape) return out
def Perc(poly, q, dist, sample=10000, **kws)
Percentile function. Note that this function is an empirical function that operates using Monte Carlo sampling. Args: poly (Poly): Polynomial of interest. q (numpy.ndarray): positions where percentiles are taken. Must be a number or an array, where all values are on the interval ``[0, 100]``. dist (Dist): Defines the space where percentile is taken. sample (int): Number of samples used in estimation. Returns: (numpy.ndarray): Percentiles of ``poly`` with ``Q.shape=poly.shape+q.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([0.05*x, 0.2*y, 0.01*x*y]) >>> print(numpy.around(chaospy.Perc(poly, [0, 5, 50, 95, 100], dist), 2)) [[ 0. -3. -6.3 ] [ 0. -0.64 -0.04] [ 0.03 -0.01 -0. ] [ 0.15 0.66 0.04] [ 2.1 3. 6.3 ]]
4.790602
5.190289
0.922993
abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha*subset*numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return abscissas, weights
def probabilistic_collocation(order, dist, subset=.1)
Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples.
5.002216
5.687972
0.879438
from ...distributions.baseclass import Dist if isinstance(domain, Dist): lower, upper = domain.range() parameters["dist"] = domain else: lower, upper = numpy.array(domain) parameters["lower"] = lower parameters["upper"] = upper quad_function = QUAD_FUNCTIONS[rule] parameters_spec = inspect.getargspec(quad_function)[0] parameters_spec = {key: None for key in parameters_spec} del parameters_spec["order"] for key in parameters_spec: if key in parameters: parameters_spec[key] = parameters[key] def _quad_function(order, *args, **kws): params = parameters_spec.copy() params.update(kws) abscissas, weights = quad_function(order, *args, **params) # normalize if prudent: if rule in UNORMALIZED_QUADRATURE_RULES and normalize: if isinstance(domain, Dist): if len(domain) == 1: weights *= domain.pdf(abscissas).flatten() else: weights *= domain.pdf(abscissas) weights /= numpy.sum(weights) return abscissas, weights return _quad_function
def get_function(rule, domain, normalize, **parameters)
Create a quadrature function and set default parameter values. Args: rule (str): Name of quadrature rule defined in ``QUAD_FUNCTIONS``. domain (Dist, numpy.ndarray): Defines ``lower`` and ``upper`` that is passed quadrature rule. If ``Dist``, ``domain`` is renamed to ``dist`` and also passed. normalize (bool): In the case of distributions, the abscissas and weights are not tailored to a distribution beyond matching the bounds. If True, the samples are normalized multiplying the weights with the density of the distribution evaluated at the abscissas and normalized afterwards to sum to one. parameters (:py:data:typing.Any): Redefining of the parameter defaults. Only add parameters that the quadrature rule expect. Returns: (:py:data:typing.Callable): Function that can be called only using argument ``order``.
3.286551
2.693012
1.2204
abscissas = numpy.asarray(abscissas) if len(abscissas.shape) == 1: abscissas = abscissas.reshape(1, *abscissas.shape) evals = numpy.array(evals) poly_evals = polynomials(*abscissas).T shape = evals.shape[1:] evals = evals.reshape(evals.shape[0], int(numpy.prod(evals.shape[1:]))) if isinstance(rule, str): rule = rule.upper() if rule == "LS": uhat = linalg.lstsq(poly_evals, evals)[0] elif rule == "T": uhat = rlstsq(poly_evals, evals, order=order, alpha=alpha, cross=False) elif rule == "TC": uhat = rlstsq(poly_evals, evals, order=order, alpha=alpha, cross=True) else: from sklearn.linear_model.base import LinearModel assert isinstance(rule, LinearModel) uhat = rule.fit(poly_evals, evals).coef_.T evals = evals.reshape(evals.shape[0], *shape) approx_model = chaospy.poly.sum((polynomials*uhat.T), -1) approx_model = chaospy.poly.reshape(approx_model, shape) if retall == 1: return approx_model, uhat elif retall == 2: return approx_model, uhat, poly_evals return approx_model
def fit_regression( polynomials, abscissas, evals, rule="LS", retall=False, order=0, alpha=-1, )
Fit a polynomial chaos expansion using linear regression. Args: polynomials (chaospy.poly.base.Poly): Polynomial expansion with ``polynomials.shape=(M,)`` and `polynomials.dim=D`. abscissas (numpy.ndarray): Collocation nodes with ``abscissas.shape == (D, K)``. evals (numpy.ndarray): Model evaluations with ``len(evals)=K``. retall (bool): If True return Fourier coefficients in addition to R. order (int): Tikhonov regularization order. alpha (float): Dampning parameter for the Tikhonov regularization. Calculated automatically if negative. Returns: (Poly, numpy.ndarray): Fitted polynomial with ``R.shape=evals.shape[1:]`` and ``R.dim=D``. The Fourier coefficients in the estimation. Examples: >>> x, y = chaospy.variable(2) >>> polynomials = chaospy.Poly([1, x, y]) >>> abscissas = [[-1,-1,1,1], [-1,1,-1,1]] >>> evals = [0,1,1,2] >>> print(chaospy.around(chaospy.fit_regression( ... polynomials, abscissas, evals), 14)) 0.5q0+0.5q1+1.0
2.425124
2.573086
0.942496
coef_mat = numpy.array(coef_mat) ordinate = numpy.array(ordinate) dim1, dim2 = coef_mat.shape if cross: out = numpy.empty((dim1, dim2) + ordinate.shape[1:]) coef_mat_ = numpy.empty((dim1-1, dim2)) ordinate_ = numpy.empty((dim1-1,) + ordinate.shape[1:]) for i in range(dim1): coef_mat_[:i] = coef_mat[:i] coef_mat_[i:] = coef_mat[i+1:] ordinate_[:i] = ordinate[:i] ordinate_[i:] = ordinate[i+1:] out[i] = rlstsq(coef_mat_, ordinate_, order, alpha, False) return numpy.median(out, 0) if order == 0: tikhmat = numpy.eye(dim2) elif order == 1: tikhmat = numpy.zeros((dim2-1, dim2)) tikhmat[:, :-1] -= numpy.eye(dim2-1) tikhmat[:, 1:] += numpy.eye(dim2-1) elif order == 2: tikhmat = numpy.zeros((dim2-2, dim2)) tikhmat[:, :-2] += numpy.eye(dim2-2) tikhmat[:, 1:-1] -= 2*numpy.eye(dim2-2) tikhmat[:, 2:] += numpy.eye(dim2-2) elif order is None: tikhmat = numpy.zeros(1) else: tikhmat = numpy.array(order) assert tikhmat.shape[-1] == dim2 or tikhmat.shape in ((), (1,)) if alpha < 0 and order is not None: gamma = 0.1 def rgcv_error(alpha): if alpha <= 0: return numpy.inf coef_mat_ = numpy.dot( coef_mat.T, coef_mat)+alpha*(numpy.dot(tikhmat.T, tikhmat)) try: coef_mat_ = numpy.dot(linalg.inv(coef_mat_), coef_mat.T) except linalg.LinAlgError: return numpy.inf abscissas = numpy.dot(coef_mat_, ordinate) res2 = numpy.sum((numpy.dot(coef_mat, abscissas)-ordinate)**2) coef_mat_2 = numpy.dot(coef_mat, coef_mat_) skew = dim1*res2/numpy.trace(numpy.eye(dim1)-coef_mat_2)**2 mu2 = numpy.sum(coef_mat_2*coef_mat_2.T)/dim1 return (gamma + (1-gamma)*mu2)*skew alphas = 10.**-numpy.arange(0, 16) evals = numpy.array([rgcv_error(alpha) for alpha in alphas]) alpha = alphas[numpy.argmin(evals)] out = linalg.inv( numpy.dot(coef_mat.T, coef_mat) + alpha*numpy.dot(tikhmat.T, tikhmat)) out = numpy.dot(out, numpy.dot(coef_mat.T, ordinate)) return out
def rlstsq(coef_mat, ordinate, order=1, alpha=-1, cross=False)
Least Squares Minimization using Tikhonov regularization. Includes method for robust generalized cross-validation. Args: coef_mat (numpy.ndarray): Coefficient matrix with shape ``(M, N)``. ordinate (numpy.ndarray): Ordinate or "dependent variable" values with shape ``(M,)`` or ``(M, K)``. If ``ordinate`` is two-dimensional, the least-squares solution is calculated for each of the ``K`` columns of ``ordinate``. order (int, numpy.ndarray): If int, it is the order of Tikhonov regularization. If `numpy.ndarray`, it will be used as regularization matrix. alpha (float): Lower threshold for the dampening parameter. The real value is calculated using generalised cross validation. cross (bool): Use cross validation to estimate alpha value.
2.235733
2.274507
0.982953
order = sorted(GENZ_KEISTER_24.keys())[order] abscissas, weights = GENZ_KEISTER_24[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
def quad_genz_keister_24 ( order )
Hermite Genz-Keister 24 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_24(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
2.768613
3.528265
0.784695
try: args = inspect.signature(caller).parameters except AttributeError: args = inspect.getargspec(caller).args return key in args
def contains_call_signature(caller, key)
Check if a function or method call signature contains a specific argument. Args: caller (Callable): Method or function to check if signature is contain in. key (str): Signature to look for. Returns: True if ``key`` exits in ``caller`` call signature. Examples: >>> def foo(param): pass >>> contains_call_signature(foo, "param") True >>> contains_call_signature(foo, "not_param") False >>> class Bar: ... def baz(self, param): pass >>> bar = Bar() >>> contains_call_signature(bar.baz, "param") True >>> contains_call_signature(bar.baz, "not_param") False
3.316486
4.966848
0.667724
dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean out = [ numpy.mean(matrix_1*((generator[index]-mean)-matrix_0), -1) / numpy.where(variance, variance, 1) for index in numpy.eye(dim, dtype=bool) ] return numpy.array(out)
def Sens_m_sample(poly, dist, samples, rule="R")
First order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the first sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_m_sample(poly, dist, 10000, rule="M"), 4)) [[0.008 0.0026 0. ] [0. 0.6464 2.1321]]
4.492112
5.032916
0.892547
dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean out = numpy.empty((dim, dim)+poly.shape) for dim1 in range(dim): index[dim1] = 1 matrix = generator[index]-mean out[dim1, dim1] = numpy.mean( matrix_1*(matrix-matrix_0), -1, ) / numpy.where(variance, variance, 1) for dim2 in range(dim1+1, dim): index[dim2] = 1 matrix = generator[index]-mean out[dim1, dim2] = out[dim2, dim1] = numpy.mean( matrix_1*(matrix-matrix_0), -1, ) / numpy.where(variance, variance, 1) index[dim2] = 0 index[dim1] = 0 return out
def Sens_m2_sample(poly, dist, samples, rule="R")
Second order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(dist), len(poly))` where `sens[dim1][dim2][pol]` is the correlating sensitivity between dimension `dim1` and `dim2` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_m2_sample(poly, dist, 10000, rule="H"), 4)) [[[ 0.008 0.0026 0. ] [-0.0871 1.1516 1.2851]] <BLANKLINE> [[-0.0871 1.1516 1.2851] [ 0. 0.7981 1.38 ]]]
2.957716
3.024245
0.978001
generator = Saltelli(dist, samples, poly, rule=rule) dim = len(dist) zeros = [0]*dim variance = numpy.var(generator[zeros], -1) return numpy.array([ 1-numpy.mean((generator[~index]-generator[zeros])**2, -1,) / (2*numpy.where(variance, variance, 1)) for index in numpy.eye(dim, dtype=bool) ])
def Sens_t_sample(poly, dist, samples, rule="R")
Total order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the total order sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_t_sample(poly, dist, 10000, rule="H"), 4)) [[ 1. 0.2 -0.3807] [ 0.9916 0.9962 1. ]]
7.236266
8.502597
0.851065
new = numpy.empty(self.samples1.shape) for idx in range(len(indices)): if indices[idx]: new[idx] = self.samples1[idx] else: new[idx] = self.samples2[idx] if self.poly: new = self.poly(*new) return new
def get_matrix(self, indices)
Retrieve Saltelli matrix.
3.690748
3.636377
1.014952
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left+right, left+right else: left, right = right, left right = numpy.asfarray(right) if len(right.shape) == 3: xloc_ = (xloc.T-right[0].T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache.copy()) lower0, upper0 = (lower.T+right[0].T).T, (upper.T+right[0].T).T xloc_ = (xloc.T-right[1].T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache) lower1, upper1 = (lower.T+right[1].T).T, (upper.T+right[1].T).T lower = numpy.min([lower0, lower1], 0) upper = numpy.max([upper0, upper1], 0) else: xloc_ = (xloc.T-right.T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache.copy()) lower, upper = (lower.T+right.T).T, (upper.T+right.T).T assert lower.shape == xloc.shape assert upper.shape == xloc.shape return lower, upper
def _bnd(self, xloc, left, right, cache)
Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(chaospy.Add(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [3. 3. 3. 3.]] >>> print(chaospy.Add(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [3. 3. 3. 3.]] >>> print(chaospy.Add(1, 1).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [2. 2. 2. 2.]]
2.246402
2.302068
0.975819
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.asfarray(left+right <= xloc) else: left, right = right, left xloc = (xloc.T-numpy.asfarray(right).T).T output = evaluation.evaluate_forward(left, xloc, cache=cache) assert output.shape == xloc.shape return output
def _cdf(self, xloc, left, right, cache)
Cumulative distribution function. Example: >>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.5 1. 1. ] >>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.5 1. ] >>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.5 1. ] >>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0. 1.]
4.178553
4.541179
0.920147
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.inf else: left, right = right, left xloc = (xloc.T-numpy.asfarray(right).T).T output = evaluation.evaluate_density(left, xloc, cache=cache) assert output.shape == xloc.shape return output
def _pdf(self, xloc, left, right, cache)
Probability density function. Example: >>> print(chaospy.Uniform().pdf([-2, 0, 2, 4])) [0. 1. 0. 0.] >>> print(chaospy.Add(chaospy.Uniform(), 2).pdf([-2, 0, 2, 4])) [0. 0. 1. 0.] >>> print(chaospy.Add(2, chaospy.Uniform()).pdf([-2, 0, 2, 4])) [0. 0. 1. 0.] >>> print(chaospy.Add(1, 1).pdf([-2, 0, 2, 4])) # Dirac logic [ 0. 0. inf 0.]
4.129701
4.577256
0.902222
left = evaluation.get_inverse_cache(left, cache) right = evaluation.get_inverse_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left+right else: left, right = right, left xloc = evaluation.evaluate_inverse(left, uloc, cache=cache) output = (xloc.T + numpy.asfarray(right).T).T return output
def _ppf(self, uloc, left, right, cache)
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Add(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])) [2.1 2.2 2.9] >>> print(chaospy.Add(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [2.1 2.2 2.9] >>> print(chaospy.Add(1, 1).inv([0.1, 0.2, 0.9])) [2. 2. 2.]
4.383789
4.991502
0.878251
if evaluation.get_dependencies(left, right): raise evaluation.DependencyError( "sum of dependent distributions not feasible: " "{} and {}".format(left, right) ) keys_ = numpy.mgrid[tuple(slice(0, key+1, 1) for key in keys)] keys_ = keys_.reshape(len(self), -1) if isinstance(left, Dist): left = [ evaluation.evaluate_moment(left, key, cache=cache) for key in keys_.T ] else: left = list(reversed(numpy.array(left).T**keys_.T)) if isinstance(right, Dist): right = [ evaluation.evaluate_moment(right, key, cache=cache) for key in keys_.T ] else: right = list(reversed(numpy.array(right).T**keys_.T)) out = numpy.zeros(keys.shape) for idx in range(keys_.shape[1]): key = keys_.T[idx] coef = comb(keys.T, key) out += coef*left[idx]*right[idx]*(key <= keys.T) if len(self) > 1: out = numpy.prod(out, 1) return out
def _mom(self, keys, left, right, cache)
Statistical moments. Example: >>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4)) [1. 0.5 0.3333 0.25 ] >>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4)) [ 1. 2.5 6.3333 16.25 ] >>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4)) [ 1. 2.5 6.3333 16.25 ] >>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4)) [1. 2. 4. 8.]
3.365453
3.448684
0.975866
if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "sum of distributions not feasible: " "{} and {}".format(left, right) ) else: if not isinstance(right, Dist): raise StochasticallyDependentError( "recurrence coefficients for constants not feasible: " "{}".format(left+right) ) left, right = right, left coeff0, coeff1 = evaluation.evaluate_recurrence_coefficients( left, kloc, cache=cache) return coeff0 + numpy.asarray(right), coeff1
def _ttr(self, kloc, left, right, cache)
Three terms recursion coefficients. Example: >>> print(numpy.around(chaospy.Uniform().ttr([0, 1, 2, 3]), 4)) [[ 0.5 0.5 0.5 0.5 ] [-0. 0.0833 0.0667 0.0643]] >>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).ttr([0, 1, 2, 3]), 4)) [[ 2.5 2.5 2.5 2.5 ] [-0. 0.0833 0.0667 0.0643]] >>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).ttr([0, 1, 2, 3]), 4)) [[ 2.5 2.5 2.5 2.5 ] [-0. 0.0833 0.0667 0.0643]] >>> print(numpy.around(chaospy.Add(1, 1).ttr([0, 1, 2, 3]), 4)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... chaospy.distributions.baseclass.StochasticallyDependentError: recurrence ...
4.772212
4.011666
1.189584
from .. import distributions assert not distributions.evaluation.get_dependencies(dist) if len(dist) > 1: # one for each dimension: orth, norms, coeff1, coeff2 = zip(*[generate_stieltjes( _, order, accuracy, normed, retall=True, **kws) for _ in dist]) # ensure each polynomial has its own dimension: orth = [[chaospy.setdim(_, len(orth)) for _ in poly] for poly in orth] orth = [[chaospy.rolldim(_, len(dist)-idx) for _ in poly] for idx, poly in enumerate(orth)] orth = [chaospy.poly.base.Poly(_) for _ in zip(*orth)] if not retall: return orth # stack results: norms = numpy.vstack(norms) coeff1 = numpy.vstack(coeff1) coeff2 = numpy.vstack(coeff2) return orth, norms, coeff1, coeff2 try: orth, norms, coeff1, coeff2 = _stieltjes_analytical( dist, order, normed) except NotImplementedError: orth, norms, coeff1, coeff2 = _stieltjes_approx( dist, order, accuracy, normed, **kws) if retall: assert not numpy.any(numpy.isnan(coeff1)) assert not numpy.any(numpy.isnan(coeff2)) return orth, norms, coeff1, coeff2 return orth
def generate_stieltjes( dist, order, accuracy=100, normed=False, retall=False, **kws)
Discretized Stieltjes' method. Args: dist (Dist): Distribution defining the space to create weights for. order (int): The polynomial order create. accuracy (int): The quadrature order of the Clenshaw-Curtis nodes to use at each step, if approximation is used. retall (bool): If included, more values are returned Returns: (list): List of polynomials, norms of polynomials and three terms coefficients. The list created from the method with ``len(orth) == order+1``. If ``len(dist) > 1``, then each polynomials are multivariate. (numpy.ndarray, numpy.ndarray, numpy.ndarray): If ``retall`` is true, also return polynomial norms and the three term coefficients. The norms of the polynomials with ``norms.shape = (dim, order+1)`` where ``dim`` are the number of dimensions in dist. The coefficients have ``shape == (dim, order+1)``. Examples: >>> dist = chaospy.J(chaospy.Normal(), chaospy.Weibull()) >>> orth, norms, coeffs1, coeffs2 = chaospy.generate_stieltjes( ... dist, 2, retall=True) >>> print(chaospy.around(orth[2], 5)) [q0^2-1.0, q1^2-4.0q1+2.0] >>> print(numpy.around(norms, 5)) [[1. 1. 2.] [1. 1. 4.]] >>> print(numpy.around(coeffs1, 5)) [[0. 0. 0.] [1. 3. 5.]] >>> print(numpy.around(coeffs2, 5)) [[1. 1. 2.] [1. 1. 4.]] >>> dist = chaospy.Uniform() >>> orth, norms, coeffs1, coeffs2 = chaospy.generate_stieltjes( ... dist, 2, retall=True) >>> print(chaospy.around(orth[2], 8)) q0^2-q0+0.16666667 >>> print(numpy.around(norms, 4)) [[1. 0.0833 0.0056]]
3.121947
2.791192
1.1185
dimensions = len(dist) mom_order = numpy.arange(order+1).repeat(dimensions) mom_order = mom_order.reshape(order+1, dimensions).T coeff1, coeff2 = dist.ttr(mom_order) coeff2[:, 0] = 1. poly = chaospy.poly.collection.core.variable(dimensions) if normed: orth = [ poly**0*numpy.ones(dimensions), (poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]), ] for order_ in range(1, order): orth.append( (orth[-1]*(poly-coeff1[:, order_]) -orth[-2]*numpy.sqrt(coeff2[:, order_])) /numpy.sqrt(coeff2[:, order_+1]) ) norms = numpy.ones(coeff2.shape) else: orth = [poly-poly, poly**0*numpy.ones(dimensions)] for order_ in range(order): orth.append( orth[-1]*(poly-coeff1[:, order_]) - orth[-2]*coeff2[:, order_] ) orth = orth[1:] norms = numpy.cumprod(coeff2, 1) return orth, norms, coeff1, coeff2
def _stieltjes_analytical(dist, order, normed)
Stieltjes' method with analytical recurrence coefficients.
3.352756
3.357859
0.99848
kws["rule"] = kws.get("rule", "C") assert kws["rule"].upper() != "G" absisas, weights = chaospy.quad.generate_quadrature( accuracy, dist.range(), **kws) weights = weights*dist.pdf(absisas) poly = chaospy.poly.variable(len(dist)) orth = [poly*0, poly**0] inner = numpy.sum(absisas*weights, -1) norms = [numpy.ones(len(dist)), numpy.ones(len(dist))] coeff1 = [] coeff2 = [] for _ in range(order): coeff1.append(inner/norms[-1]) coeff2.append(norms[-1]/norms[-2]) orth.append((poly-coeff1[-1])*orth[-1] - orth[-2]*coeff2[-1]) raw_nodes = orth[-1](*absisas)**2*weights inner = numpy.sum(absisas*raw_nodes, -1) norms.append(numpy.sum(raw_nodes, -1)) if normed: orth[-1] = orth[-1]/numpy.sqrt(norms[-1]) coeff1.append(inner/norms[-1]) coeff2.append(norms[-1]/norms[-2]) coeff1 = numpy.transpose(coeff1) coeff2 = numpy.transpose(coeff2) norms = numpy.array(norms[1:]).T orth = orth[1:] return orth, norms, coeff1, coeff2
def _stieltjes_approx(dist, order, accuracy, normed, **kws)
Stieltjes' method with approximative recurrence coefficients.
3.391399
3.344496
1.014024
global RANDOM_SEED # pylint: disable=global-statement if seed_value is not None: RANDOM_SEED = seed_value if step is not None: RANDOM_SEED += step
def set_state(seed_value=None, step=None)
Set random seed.
2.277779
2.031771
1.12108
assert 0 < dim < DIM_MAX, "dim in [1, 40]" # global RANDOM_SEED # pylint: disable=global-statement # if seed is None: # seed = RANDOM_SEED # RANDOM_SEED += order set_state(seed_value=seed) seed = RANDOM_SEED set_state(step=order) # Initialize row 1 of V. samples = SOURCE_SAMPLES.copy() maxcol = int(math.log(2**LOG_MAX-1, 2))+1 samples[0, 0:maxcol] = 1 # Initialize the remaining rows of V. for idx in range(1, dim): # The bits of the integer POLY(I) gives the form of polynomial: degree = int(math.log(POLY[idx], 2)) #Expand this bit pattern to separate components: includ = numpy.array([val == "1" for val in bin(POLY[idx])[-degree:]]) #Calculate the remaining elements of row I as explained #in Bratley and Fox, section 2. for idy in range(degree+1, maxcol+1): newv = samples[idx, idy-degree-1].item() base = 1 for idz in range(1, degree+1): base *= 2 if includ[idz-1]: newv = newv ^ base * samples[idx, idy-idz-1].item() samples[idx, idy-1] = newv samples = samples[:dim] # Multiply columns of V by appropriate power of 2. samples *= 2**(numpy.arange(maxcol, 0, -1, dtype=int)) #RECIPD is 1/(common denominator of the elements in V). recipd = 0.5**(maxcol+1) lastq = numpy.zeros(dim, dtype=int) seed = int(seed) if seed > 1 else 1 for seed_ in range(seed): lowbit = len(bin(seed_)[2:].split("0")[-1]) lastq[:] = lastq ^ samples[:, lowbit] #Calculate the new components of QUASI. quasi = numpy.empty((dim, order)) for idx in range(order): lowbit = len(bin(seed+idx)[2:].split("0")[-1]) quasi[:, idx] = lastq * recipd lastq[:] = lastq ^ samples[:, lowbit] return quasi
def create_sobol_samples(order, dim, seed=1)
Args: order (int): Number of unique samples to generate. dim (int): Number of spacial dimensions. Must satisfy ``0 < dim < 41``. seed (int): Starting seed. Non-positive values are treated as 1. If omitted, consecutive samples are used. Returns: (numpy.ndarray): Quasi-random vector with ``shape == (dim, order)``.
5.386804
5.369176
1.003283
dim = len(funcs) tensprod_rule = create_tensorprod_function(funcs) assert hasattr(tensprod_rule, "__call__") mv_rule = create_mv_rule(tensprod_rule, dim) assert hasattr(mv_rule, "__call__") return mv_rule
def rule_generator(*funcs)
Constructor for creating multivariate quadrature generator. Args: funcs (:py:data:typing.Callable): One dimensional integration rule where each rule returns ``abscissas`` and ``weights`` as one dimensional arrays. They must take one positional argument ``order``. Returns: (:py:data:typing.Callable): Multidimensional integration quadrature function that takes the arguments ``order`` and ``sparse``, and a optional ``part``. The argument ``sparse`` is used to select for if Smolyak sparse grid is used, and ``part`` defines if subset of rule should be generated (for parallelization). Example: >>> clenshaw_curtis = lambda order: chaospy.quad_clenshaw_curtis( ... order, lower=-1, upper=1, growth=True) >>> gauss_legendre = lambda order: chaospy.quad_gauss_legendre( ... order, lower=0, upper=1) >>> quad_func = chaospy.rule_generator(clenshaw_curtis, gauss_legendre) >>> abscissas, weights = quad_func(1) >>> print(numpy.around(abscissas, 4)) [[-1. -1. 0. 0. 1. 1. ] [ 0.2113 0.7887 0.2113 0.7887 0.2113 0.7887]] >>> print(numpy.around(weights, 4)) [0.1667 0.1667 0.6667 0.6667 0.1667 0.1667]
4.869684
7.647551
0.636764
dim = len(funcs) def tensprod_rule(order, part=None): order = order*numpy.ones(dim, int) values = [funcs[idx](order[idx]) for idx in range(dim)] abscissas = [numpy.array(_[0]).flatten() for _ in values] abscissas = chaospy.quad.combine(abscissas, part=part).T weights = [numpy.array(_[1]).flatten() for _ in values] weights = numpy.prod(chaospy.quad.combine(weights, part=part), -1) return abscissas, weights return tensprod_rule
def create_tensorprod_function(funcs)
Combine 1-D rules into multivariate rule using tensor product.
3.760374
3.510577
1.071155
def mv_rule(order, sparse=False, part=None): if sparse: order = numpy.ones(dim, dtype=int)*order tensorprod_rule_ = lambda order, part=part:\ tensorprod_rule(order, part=part) return chaospy.quad.sparse_grid(tensorprod_rule_, order) return tensorprod_rule(order, part=part) return mv_rule
def create_mv_rule(tensorprod_rule, dim)
Convert tensor product rule into a multivariate quadrature generator.
5.065554
4.809331
1.053276
order = numpy.asarray(order, dtype=int).flatten() lower = numpy.asarray(lower).flatten() upper = numpy.asarray(upper).flatten() dim = max(lower.size, upper.size, order.size) order = numpy.ones(dim, dtype=int)*order lower = numpy.ones(dim)*lower upper = numpy.ones(dim)*upper composite = numpy.array([numpy.arange(2)]*dim) if growth: results = [ _fejer(numpy.where(order[i] == 0, 0, 2.**(order[i]+1)-2)) for i in range(dim) ] else: results = [ _fejer(order[i], composite[i]) for i in range(dim) ] abscis = [_[0] for _ in results] weight = [_[1] for _ in results] abscis = chaospy.quad.combine(abscis, part=part).T weight = chaospy.quad.combine(weight, part=part) abscis = ((upper-lower)*abscis.T + lower).T weight = numpy.prod(weight*(upper-lower), -1) assert len(abscis) == dim assert len(weight) == len(abscis.T) return abscis, weight
def quad_fejer(order, lower=0, upper=1, growth=False, part=None)
Generate the quadrature abscissas and weights in Fejer quadrature. Example: >>> abscissas, weights = quad_fejer(3, 0, 1) >>> print(numpy.around(abscissas, 4)) [[0.0955 0.3455 0.6545 0.9045]] >>> print(numpy.around(weights, 4)) [0.1804 0.2996 0.2996 0.1804]
2.864366
2.882109
0.993844
r order = int(order) if order == 0: return numpy.array([.5]), numpy.array([1.]) order += 2 theta = (order-numpy.arange(order+1))*numpy.pi/order abscisas = 0.5*numpy.cos(theta) + 0.5 N, K = numpy.mgrid[:order+1, :order//2] weights = 2*numpy.cos(2*(K+1)*theta[N])/(4*K*(K+2)+3) if order % 2 == 0: weights[:, -1] *= 0.5 weights = (1-numpy.sum(weights, -1)) / order return abscisas[1:-1], weights[1:-1]
def _fejer(order, composite=None)
r""" Backend method. Examples: >>> abscissas, weights = _fejer(0) >>> print(abscissas) [0.5] >>> print(weights) [1.] >>> abscissas, weights = _fejer(1) >>> print(abscissas) [0.25 0.75] >>> print(weights) [0.44444444 0.44444444] >>> abscissas, weights = _fejer(2) >>> print(abscissas) [0.14644661 0.5 0.85355339] >>> print(weights) [0.26666667 0.4 0.26666667] >>> abscissas, weights = _fejer(3) >>> print(abscissas) [0.0954915 0.3454915 0.6545085 0.9045085] >>> print(weights) [0.18037152 0.29962848 0.29962848 0.18037152] >>> abscissas, weights = _fejer(4) >>> print(abscissas) [0.0669873 0.25 0.5 0.75 0.9330127] >>> print(weights) [0.12698413 0.22857143 0.26031746 0.22857143 0.12698413] >>> abscissas, weights = _fejer(5) >>> print(abscissas) [0.04951557 0.1882551 0.38873953 0.61126047 0.8117449 0.95048443] >>> print(weights) [0.0950705 0.17612121 0.2186042 0.2186042 0.17612121 0.0950705 ]
4.569037
4.336087
1.053724
randoms = numpy.random.random(order*dim).reshape((dim, order)) for dim_ in range(dim): perm = numpy.random.permutation(order) # pylint: disable=no-member randoms[dim_] = (perm + randoms[dim_])/order return randoms
def create_latin_hypercube_samples(order, dim=1)
Latin Hypercube sampling. Args: order (int): The order of the latin hyper-cube. Defines the number of samples. dim (int): The number of dimensions in the latin hyper-cube. Returns (numpy.ndarray): Latin hyper-cube with ``shape == (dim, order)``.
3.999766
4.679789
0.85469
if dim == 1: return create_halton_samples( order=order, dim=1, burnin=burnin, primes=primes) out = numpy.empty((dim, order), dtype=float) out[:dim-1] = create_halton_samples( order=order, dim=dim-1, burnin=burnin, primes=primes) out[dim-1] = numpy.linspace(0, 1, order+2)[1:-1] return out
def create_hammersley_samples(order, dim=1, burnin=-1, primes=())
Create samples from the Hammersley set. For ``dim == 1`` the sequence falls back to Van Der Corput sequence. Args: order (int): The order of the Hammersley sequence. Defines the number of samples. dim (int): The number of dimensions in the Hammersley sequence. burnin (int): Skip the first ``burnin`` samples. If negative, the maximum of ``primes`` is used. primes (tuple): The (non-)prime base to calculate values along each axis. If empty, growing prime values starting from 2 will be used. Returns: (numpy.ndarray): Hammersley set with ``shape == (dim, order)``.
2.15474
2.248474
0.958312
if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold+1, 2)) root_of_threshold = threshold ** 0.5 half = int((threshold+1)/2-1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter*counter-3)/2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2*idx+3 return [2] + [number for number in numbers if number]
def create_primes(threshold)
Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``.
2.975603
2.904883
1.024345
if cache is None: cache = {} out = numpy.zeros(u_data.shape) # Distribution self know how to handle inverse Rosenblatt. if hasattr(distribution, "_ppf"): parameters = load_parameters( distribution, "_ppf", parameters=parameters, cache=cache) out[:] = distribution._ppf(u_data.copy(), **parameters) # Approximate inverse Rosenblatt based on cumulative distribution function. else: from .. import approximation parameters = load_parameters( distribution, "_cdf", parameters=parameters, cache=cache) out[:] = approximation.approximate_inverse( distribution, u_data.copy(), cache=cache.copy(), parameters=parameters) # Store cache. cache[distribution] = out return out
def evaluate_inverse( distribution, u_data, cache=None, parameters=None )
Evaluate inverse Rosenblatt transformation. Args: distribution (Dist): Distribution to evaluate. u_data (numpy.ndarray): Locations for where evaluate inverse transformation distribution at. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The cumulative distribution values of ``distribution`` at location ``u_data`` using parameters ``parameters``.
4.090717
3.725384
1.098066
if (idxi, idxj, idxk) in self.hist: return self.hist[idxi, idxj, idxk] if idxi == idxk == 0 or idxi == idxj == 0: out = 0 elif chaospy.bertran.add(idxi, idxk, self.dim) < idxj \ or chaospy.bertran.add(idxi, idxj, self.dim) < idxk: out = 0 elif chaospy.bertran.add(idxi, idxj, self.dim) == idxk: out = 1 elif idxj == idxk == 0: out = self.dist.mom(chaospy.bertran.multi_index(idxi, self.dim)) elif idxk == 0: out = self.mom_110(idxi, idxj, idxk) else: out = self.mom_recurse(idxi, idxj, idxk) self.hist[idxi, idxj, idxk] = out return out
def mom_111(self, idxi, idxj, idxk)
Backend moment for i, j, k == 1, 1, 1.
2.429499
2.405323
1.010051
rank_ = min( chaospy.bertran.rank(idxi, self.dim), chaospy.bertran.rank(idxj, self.dim), chaospy.bertran.rank(idxk, self.dim) ) par, axis0 = chaospy.bertran.parent(idxj, self.dim) gpar, _ = chaospy.bertran.parent(par, self.dim, axis0) idxi_child = chaospy.bertran.child(idxi, self.dim, axis0) oneup = chaospy.bertran.child(0, self.dim, axis0) out = self(idxi_child, par, 0) for k in range(gpar, idxj): if chaospy.bertran.rank(k, self.dim) >= rank_: out -= self.mom_111(oneup, par, k) * self.mom_111(idxi, k, 0) return out
def mom_110(self, idxi, idxj, idxk)
Backend moment for i, j, k == 1, 1, 1.
3.399202
3.388896
1.003041
rank_ = min( chaospy.bertran.rank(idxi, self.dim), chaospy.bertran.rank(idxj, self.dim), chaospy.bertran.rank(idxk, self.dim) ) par, axis0 = chaospy.bertran.parent(idxk, self.dim) gpar, _ = chaospy.bertran.parent(par, self.dim, axis0) idxi_child = chaospy.bertran.child(idxi, self.dim, axis0) oneup = chaospy.bertran.child(0, self.dim, axis0) out1 = self.mom_111(idxi_child, idxj, par) out2 = self.mom_111( chaospy.bertran.child(oneup, self.dim, axis0), par, par) for k in range(gpar, idxk): if chaospy.bertran.rank(k, self.dim) >= rank_: out1 -= self.mom_111(oneup, k, par) \ * self.mom_111(idxi, idxj, k) out2 -= self.mom_111(oneup, par, k) \ * self(oneup, k, par) return out1 / out2
def mom_recurse(self, idxi, idxj, idxk)
Backend mement main loop.
2.939461
2.914077
1.008711
assert dist.__class__.__name__ == "Copula" trans = dist.prm["trans"] assert trans.__class__.__name__ == "nataf" vals = numpy.array(vals) cov = trans.prm["C"] cov = numpy.dot(cov, cov.T) marginal = dist.prm["dist"] dim = len(dist) orth = chaospy.orthogonal.orth_ttr(order, marginal, sort="GR") r = range(dim) index = [1] + [0]*(dim-1) nataf = chaospy.dist.Nataf(marginal, cov, r) samples_ = marginal.inv( nataf.fwd( samples ) ) poly, coeffs = chaospy.collocation.fit_regression( orth, samples_, vals, retall=1) V = Var(poly, marginal, **kws) out = numpy.zeros((dim,) + poly.shape) out[0] = Var(E_cond(poly, index, marginal, **kws), marginal, **kws)/(V+(V == 0))*(V != 0) for i in range(1, dim): r = r[1:] + r[:1] index = index[-1:] + index[:-1] nataf = chaospy.dist.Nataf(marginal, cov, r) samples_ = marginal.inv( nataf.fwd( samples ) ) poly, coeffs = chaospy.collocation.fit_regression( orth, samples_, vals, retall=1) out[i] = Var(E_cond(poly, index, marginal, **kws), marginal, **kws)/(V+(V == 0))*(V != 0) return out
def Sens_m_nataf(order, dist, samples, vals, **kws)
Variance-based decomposition through the Nataf distribution. Generates first order sensitivity indices Args: order (int): Polynomial order used ``orth_ttr``. dist (Copula): Assumed to be Nataf with independent components samples (numpy.ndarray): Samples used for evaluation (typically generated from ``dist``.) vals (numpy.ndarray): Evaluations of the model for given samples. Returns: (numpy.ndarray): Sensitivity indices with shape ``(len(dist),) + vals.shape[1:]``.
3.477565
3.419044
1.017116
order = numpy.array(order)*numpy.ones(len(dist), dtype=int)+1 _, _, coeff1, coeff2 = chaospy.quad.generate_stieltjes( dist, numpy.max(order), accuracy=accuracy, retall=True, **kws) dimensions = len(dist) abscisas, weights = _golbub_welsch(order, coeff1, coeff2) if dimensions == 1: abscisa = numpy.reshape(abscisas, (1, order[0])) weight = numpy.reshape(weights, (order[0],)) else: abscisa = chaospy.quad.combine(abscisas).T weight = numpy.prod(chaospy.quad.combine(weights), -1) assert len(abscisa) == dimensions assert len(weight) == len(abscisa.T) return abscisa, weight
def quad_golub_welsch(order, dist, accuracy=100, **kws)
Golub-Welsch algorithm for creating quadrature nodes and weights. Args: order (int): Quadrature order dist (Dist): Distribution nodes and weights are found for with `dim=len(dist)` accuracy (int): Accuracy used in discretized Stieltjes procedure. Will be increased by one for each iteration. Returns: (numpy.ndarray, numpy.ndarray): Optimal collocation nodes with `x.shape=(dim, order+1)` and weights with `w.shape=(order+1,)`. Examples: >>> Z = chaospy.Normal() >>> x, w = chaospy.quad_golub_welsch(3, Z) >>> print(numpy.around(x, 4)) [[-2.3344 -0.742 0.742 2.3344]] >>> print(numpy.around(w, 4)) [0.0459 0.4541 0.4541 0.0459] >>> Z = chaospy.J(chaospy.Uniform(), chaospy.Uniform()) >>> x, w = chaospy.quad_golub_welsch(1, Z) >>> print(numpy.around(x, 4)) [[0.2113 0.2113 0.7887 0.7887] [0.2113 0.7887 0.2113 0.7887]] >>> print(numpy.around(w, 4)) [0.25 0.25 0.25 0.25]
3.542701
3.990164
0.887859
abscisas, weights = [], [] for dim, order in enumerate(orders): if order: bands = numpy.zeros((2, order)) bands[0] = coeff1[dim, :order] bands[1, :-1] = numpy.sqrt(coeff2[dim, 1:order]) vals, vecs = scipy.linalg.eig_banded(bands, lower=True) abscisa, weight = vals.real, vecs[0, :]**2 indices = numpy.argsort(abscisa) abscisa, weight = abscisa[indices], weight[indices] else: abscisa, weight = numpy.array([coeff1[dim, 0]]), numpy.array([1.]) abscisas.append(abscisa) weights.append(weight) return abscisas, weights
def _golbub_welsch(orders, coeff1, coeff2)
Recurrence coefficients to abscisas and weights.
2.735854
2.531318
1.080802
if isinstance(left, Dist): if left in cache: left = cache[left] else: left = evaluation.evaluate_bound(left, xloc, cache=cache) else: left = (numpy.array(left).T * numpy.ones((2,)+xloc.shape).T).T if isinstance(right, Dist): if right in cache: right = cache[right] else: right = evaluation.evaluate_bound(right, xloc, cache=cache) else: right = (numpy.array(right).T * numpy.ones((2,)+xloc.shape).T).T return left[0], right[1]
def _bnd(self, xloc, left, right, cache)
Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.6).range([-2, 0, 2, 4])) [[0. 0. 0. 0. ] [0.6 0.6 0.6 0.6]] >>> print(chaospy.Trunc(0.4, chaospy.Uniform()).range([-2, 0, 2, 4])) [[0.4 0.4 0.4 0.4] [1. 1. 1. 1. ]]
2.274262
2.2921
0.992218
if isinstance(left, Dist) and left in cache: left = cache[left] if isinstance(right, Dist) and right in cache: right = cache[right] if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) else: left = (numpy.array(left).T*numpy.ones(xloc.shape).T).T uloc1 = evaluation.evaluate_forward(right, left, cache=cache.copy()) uloc2 = evaluation.evaluate_forward(right, xloc, cache=cache) return (uloc2-uloc1)/(1-uloc1) right = (numpy.array(right).T*numpy.ones(xloc.shape).T).T uloc1 = evaluation.evaluate_forward(left, right, cache=cache.copy()) uloc2 = evaluation.evaluate_forward(left, xloc, cache=cache) return uloc2/uloc1
def _cdf(self, xloc, left, right, cache)
Cumulative distribution function. Example: >>> print(chaospy.Uniform().fwd([-0.5, 0.3, 0.7, 1.2])) [0. 0.3 0.7 1. ] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.4).fwd([-0.5, 0.2, 0.8, 1.2])) [0. 0.5 1. 1. ] >>> print(chaospy.Trunc(0.6, chaospy.Uniform()).fwd([-0.5, 0.2, 0.8, 1.2])) [0. 0. 0.5 1. ]
2.537047
2.541621
0.9982
if isinstance(left, Dist) and left in cache: left = cache[left] if isinstance(right, Dist) and right in cache: right = cache[right] if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): raise StochasticallyDependentError( "truncated variable indirectly depends on underlying variable") else: left = (numpy.array(left).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(right, left) return evaluation.evaluate_inverse(right, q*(1-uloc)+uloc, cache=cache) right = (numpy.array(right).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(left, right, cache=cache.copy()) return evaluation.evaluate_inverse(left, q*uloc, cache=cache)
def _ppf(self, q, left, right, cache)
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.4).inv([0.1, 0.2, 0.9])) [0.04 0.08 0.36] >>> print(chaospy.Trunc(0.6, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [0.64 0.68 0.96]
3.401288
3.280346
1.036869
if isinstance(poly, distributions.Dist): x = polynomials.variable(len(poly)) poly, dist = x, poly else: poly = polynomials.Poly(poly) if fisher: adjust = 3 else: adjust = 0 shape = poly.shape poly = polynomials.flatten(poly) m1 = E(poly, dist) m2 = E(poly**2, dist) m3 = E(poly**3, dist) m4 = E(poly**4, dist) out = (m4-4*m3*m1 + 6*m2*m1**2 - 3*m1**4) /\ (m2**2-2*m2*m1**2+m1**4) - adjust out = numpy.reshape(out, shape) return out
def Kurt(poly, dist=None, fisher=True, **kws)
Kurtosis operator. Element by element 4rd order statistics of a distribution or polynomial. Args: poly (Poly, Dist): Input to take kurtosis on. dist (Dist): Defines the space the skewness is taken on. It is ignored if ``poly`` is a distribution. fisher (bool): If True, Fisher's definition is used (Normal -> 0.0). If False, Pearson's definition is used (normal -> 3.0) Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``skewness.shape==poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(numpy.around(chaospy.Kurt(dist), 4)) [6. 0.] >>> print(numpy.around(chaospy.Kurt(dist, fisher=False), 4)) [9. 3.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(numpy.around(chaospy.Kurt(poly, dist), 4)) [nan 6. 0. 15.]
3.144041
3.239966
0.970393
if stop is None: start, stop = numpy.array(0), start start = numpy.array(start, dtype=int) stop = numpy.array(stop, dtype=int) dim = max(start.size, stop.size, dim) indices = numpy.array(chaospy.bertran.bindex( numpy.min(start), 2*numpy.max(stop), dim, sort, cross_truncation)) if start.size == 1: bellow = numpy.sum(indices, -1) >= start else: start = numpy.ones(dim, dtype=int)*start bellow = numpy.all(indices-start >= 0, -1) if stop.size == 1: above = numpy.sum(indices, -1) <= stop.item() else: stop = numpy.ones(dim, dtype=int)*stop above = numpy.all(stop-indices >= 0, -1) pool = list(indices[above*bellow]) arg = numpy.zeros(len(pool), dtype=int) arg[0] = 1 poly = {} for idx in pool: idx = tuple(idx) poly[idx] = arg arg = numpy.roll(arg, 1) x = numpy.zeros(len(pool), dtype=int) x[0] = 1 A = {} for I in pool: I = tuple(I) A[I] = x x = numpy.roll(x,1) return Poly(A, dim)
def basis(start, stop=None, dim=1, sort="G", cross_truncation=1.)
Create an N-dimensional unit polynomial basis. Args: start (int, numpy.ndarray): the minimum polynomial to include. If int is provided, set as lowest total order. If array of int, set as lower order along each axis. stop (int, numpy.ndarray): the maximum shape included. If omitted: ``stop <- start; start <- 0`` If int is provided, set as largest total order. If array of int, set as largest order along each axis. dim (int): dim of the basis. Ignored if array is provided in either start or stop. sort (str): The polynomial ordering where the letters ``G``, ``I`` and ``R`` can be used to set grade, inverse and reverse to the ordering. For ``basis(start=0, stop=2, dim=2, order=order)`` we get: ====== ================== order output ====== ================== "" [1 y y^2 x xy x^2] "G" [1 y x y^2 xy x^2] "I" [x^2 xy x y^2 y 1] "R" [1 x x^2 y xy y^2] "GIR" [y^2 xy x^2 y x 1] ====== ================== cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: (Poly) : Polynomial array. Examples: >>> print(chaospy.basis(4, 4, 2)) [q0^4, q0^3q1, q0^2q1^2, q0q1^3, q1^4] >>> print(chaospy.basis([1, 1], [2, 2])) [q0q1, q0^2q1, q0q1^2, q0^2q1^2]
2.988404
3.141381
0.951302
if len(args) == 1: low, high = 0, args[0] else: low, high = args[:2] core_old = poly.A core_new = {} for key in poly.keys: if low <= numpy.sum(key) < high: core_new[key] = core_old[key] return Poly(core_new, poly.dim, poly.shape, poly.dtype)
def cutoff(poly, *args)
Remove polynomial components with order outside a given interval. Args: poly (Poly): Input data. low (int): The lowest order that is allowed to be included. Defaults to 0. high (int): The upper threshold for the cutoff range. Returns: (Poly): The same as `P`, except that all terms that have a order not within the bound `low <= order < high` are removed. Examples: >>> poly = chaospy.prange(4, 1) + chaospy.prange(4, 2)[::-1] >>> print(poly) # doctest: +SKIP [q1^3+1, q0+q1^2, q0^2+q1, q0^3+1] >>> print(chaospy.cutoff(poly, 3)) # doctest: +SKIP [1, q0+q1^2, q0^2+q1, 1] >>> print(chaospy.cutoff(poly, 1, 3)) # doctest: +SKIP [0, q0+q1^2, q0^2+q1, 0]
3.537257
3.744237
0.94472
P, Q = Poly(P), Poly(Q) if not chaospy.poly.is_decomposed(Q): differential(chaospy.poly.decompose(Q)).sum(0) if Q.shape: return Poly([differential(P, q) for q in Q]) if Q.dim>P.dim: P = chaospy.poly.setdim(P, Q.dim) else: Q = chaospy.poly.setdim(Q, P.dim) qkey = Q.keys[0] A = {} for key in P.keys: newkey = numpy.array(key) - numpy.array(qkey) if numpy.any(newkey<0): continue A[tuple(newkey)] = P.A[key]*numpy.prod([fac(key[i], \ exact=True)/fac(newkey[i], exact=True) \ for i in range(P.dim)]) return Poly(B, P.dim, P.shape, P.dtype)
def differential(P, Q)
Polynomial differential operator. Args: P (Poly): Polynomial to be differentiated. Q (Poly): Polynomial to differentiate by. Must be decomposed. If polynomial array, the output is the Jacobian matrix.
4.384303
4.235562
1.035117