code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if unit not in ('kelvin', 'celsius', 'fahrenheit'): raise ValueError("Invalid value for parameter 'unit'") minimum = min(self._purge_none_samples(self.temperature_series()), key=itemgetter(1)) if unit == 'kelvin': result = minimum if unit == 'celsius': result = (minimum[0], temputils.kelvin_to_celsius(minimum[1])) if unit == 'fahrenheit': result = (minimum[0], temputils.kelvin_to_fahrenheit(minimum[1])) return result
def min_temperature(self, unit='kelvin')
Returns a tuple containing the min value in the temperature series preceeded by its timestamp :param unit: the unit of measure for the temperature values. May be among: '*kelvin*' (default), '*celsius*' or '*fahrenheit*' :type unit: str :returns: a tuple :raises: ValueError when invalid values are provided for the unit of measure or the measurement series is empty
2.998776
2.682125
1.11806
if unit not in ('kelvin', 'celsius', 'fahrenheit'): raise ValueError("Invalid value for parameter 'unit'") average = self._average(self._purge_none_samples( self.temperature_series())) if unit == 'kelvin': result = average if unit == 'celsius': result = temputils.kelvin_to_celsius(average) if unit == 'fahrenheit': result = temputils.kelvin_to_fahrenheit(average) return result
def average_temperature(self, unit='kelvin')
Returns the average value in the temperature series :param unit: the unit of measure for the temperature values. May be among: '*kelvin*' (default), '*celsius*' or '*fahrenheit*' :type unit: str :returns: a float :raises: ValueError when invalid values are provided for the unit of measure or the measurement series is empty
2.982603
2.959298
1.007875
return max(self._purge_none_samples(self.rain_series()), key=lambda item:item[1])
def max_rain(self)
Returns a tuple containing the max value in the rain series preceeded by its timestamp :returns: a tuple :raises: ValueError when the measurement series is empty
23.049131
16.254669
1.418001
return json.dumps({"reception_time": self._reception_time, "Location": json.loads(self._location.to_JSON()), "Weather": json.loads(self._weather.to_JSON()) })
def to_JSON(self)
Dumps object fields into a JSON formatted string :returns: the JSON string
4.321323
5.516061
0.783407
root_node = self._to_DOM() if xmlns: xmlutils.annotate_with_XMLNS(root_node, OBSERVATION_XMLNS_PREFIX, OBSERVATION_XMLNS_URL) return xmlutils.DOM_node_to_XML(root_node, xml_declaration)
def to_XML(self, xml_declaration=True, xmlns=True)
Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string
4.193828
5.447153
0.769912
root_node = ET.Element("observation") reception_time_node = ET.SubElement(root_node, "reception_time") reception_time_node.text = str(self._reception_time) root_node.append(self._location._to_DOM()) root_node.append(self._weather._to_DOM()) return root_node
def _to_DOM(self)
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
2.587985
2.922067
0.885669
lat = str(params_dict['lat']) lon = str(params_dict['lon']) params = dict(lat=lat, lon=lon) # build request URL uri = http_client.HttpClient.to_url(UV_INDEX_URL, self._API_key, None) _, json_data = self._client.cacheable_get_json(uri, params=params) return json_data
def get_uvi(self, params_dict)
Invokes the UV Index endpoint :param params_dict: dict of parameters :returns: a string containing raw JSON data :raises: *ValueError*, *APICallError*
5.583093
6.19362
0.901426
lat = str(params_dict['lat']) lon = str(params_dict['lon']) start = str(params_dict['start']) end = str(params_dict['end']) params = dict(lat=lat, lon=lon, start=start, end=end) # build request URL uri = http_client.HttpClient.to_url(UV_INDEX_HISTORY_URL, self._API_key, None) _, json_data = self._client.cacheable_get_json(uri, params=params) return json_data
def get_uvi_history(self, params_dict)
Invokes the UV Index History endpoint :param params_dict: dict of parameters :returns: a string containing raw JSON data :raises: *ValueError*, *APICallError*
4.073776
4.277472
0.952379
return [ cls.TEMPERATURE, cls.PRESSURE, cls.HUMIDITY, cls.WIND_SPEED, cls.WIND_DIRECTION, cls.CLOUDS ]
def items(cls)
All values for this enum :return: list of str
3.368231
3.342222
1.007782
return [ cls.GREATER_THAN, cls.GREATER_THAN_EQUAL, cls.LESS_THAN, cls.LESS_THAN_EQUAL, cls.EQUAL, cls.NOT_EQUAL ]
def items(cls)
All values for this enum :return: list of str
2.615578
2.641977
0.990008
if JSON_string is None: raise parse_response_error.ParseResponseError('JSON data is None') d = json.loads(JSON_string) # Check if server returned errors: this check overcomes the lack of use # of HTTP error status codes by the OWM API 2.5. This mechanism is # supposed to be deprecated as soon as the API fully adopts HTTP for # conveying errors to the clients if 'message' in d and 'cod' in d: if d['cod'] == "404": print("OWM API: data not found - response payload: " + json.dumps(d), d['cod']) return None elif d['cod'] != "200": raise api_response_error.APIResponseError("OWM API: error - response payload: " + json.dumps(d), d['cod']) try: place = location.location_from_dictionary(d) except KeyError: raise parse_response_error.ParseResponseError(''.join([__name__, ': impossible to read location info from JSON data'])) # Handle the case when no results are found if 'count' in d and d['count'] == "0": weathers = [] elif 'cnt' in d and d['cnt'] == 0: weathers = [] else: if 'list' in d: try: weathers = [weather.weather_from_dictionary(item) \ for item in d['list']] except KeyError: raise parse_response_error.ParseResponseError( ''.join([__name__, ': impossible to read weather ' \ 'info from JSON data']) ) else: raise parse_response_error.ParseResponseError( ''.join([__name__, ': impossible to read weather ' \ 'list from JSON data']) ) current_time = int(round(time.time())) return forecast.Forecast(None, current_time, place, weathers)
def parse_JSON(self, JSON_string)
Parses a *Forecast* instance out of raw JSON data. Only certain properties of the data are used: if these properties are not found or cannot be parsed, an error is issued. :param JSON_string: a raw JSON string :type JSON_string: str :returns: a *Forecast* instance or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result, *APIResponseError* if the JSON string embeds an HTTP status error
3.809572
3.649621
1.043827
return evaluation.evaluate_density( dist, numpy.arcsinh(x), cache=cache)/numpy.sqrt(1+x*x)
def _pdf(self, x, dist, cache)
Probability density function.
8.854574
9.185193
0.964005
args = list(args) # expand args to match dim if len(args) < poly.dim: args = args + [np.nan]*(poly.dim-len(args)) elif len(args) > poly.dim: raise ValueError("too many arguments") # Find and perform substitutions, if any x0, x1 = [], [] for idx, arg in enumerate(args): if isinstance(arg, Poly): poly_ = Poly({ tuple(np.eye(poly.dim)[idx]): np.array(1) }) x0.append(poly_) x1.append(arg) args[idx] = np.nan if x0: poly = call(poly, args) return substitute(poly, x0, x1) # Create masks masks = np.zeros(len(args), dtype=bool) for idx, arg in enumerate(args): if np.ma.is_masked(arg) or np.any(np.isnan(arg)): masks[idx] = True args[idx] = 0 shape = np.array( args[ np.argmax( [np.prod(np.array(arg).shape) for arg in args] ) ] ).shape args = np.array([np.ones(shape, dtype=int)*arg for arg in args]) A = {} for key in poly.keys: key_ = np.array(key)*(1-masks) val = np.outer(poly.A[key], np.prod((args.T**key_).T, \ axis=0)) val = np.reshape(val, poly.shape + tuple(shape)) val = np.where(val != val, 0, val) mkey = tuple(np.array(key)*(masks)) if not mkey in A: A[mkey] = val else: A[mkey] = A[mkey] + val out = Poly(A, poly.dim, None, None) if out.keys and not np.sum(out.keys): out = out.A[out.keys[0]] elif not out.keys: out = np.zeros(out.shape, dtype=out.dtype) return out
def call(poly, args)
Evaluate a polynomial along specified axes. Args: poly (Poly): Input polynomial. args (numpy.ndarray): Argument to be evaluated. Masked values keeps the variable intact. Returns: (Poly, numpy.ndarray): If masked values are used the Poly is returned. Else an numpy array matching the polynomial's shape is returned.
3.293467
3.310491
0.994858
x0,x1 = map(Poly, [x0,x1]) dim = np.max([p.dim for p in [P,x0,x1]]) dtype = chaospy.poly.typing.dtyping(P.dtype, x0.dtype, x1.dtype) P, x0, x1 = [chaospy.poly.dimension.setdim(p, dim) for p in [P,x0,x1]] if x0.shape: x0 = [x for x in x0] else: x0 = [x0] if x1.shape: x1 = [x for x in x1] else: x1 = [x1] # Check if substitution is needed. valid = False C = [x.keys[0].index(1) for x in x0] for key in P.keys: if np.any([key[c] for c in C]): valid = True break if not valid: return P dims = [tuple(np.array(x.keys[0])!=0).index(True) for x in x0] dec = is_decomposed(P) if not dec: P = decompose(P) P = chaospy.poly.dimension.dimsplit(P) shape = P.shape P = [p for p in chaospy.poly.shaping.flatten(P)] for i in range(len(P)): for j in range(len(dims)): if P[i].keys and P[i].keys[0][dims[j]]: P[i] = x1[j].__pow__(P[i].keys[0][dims[j]]) break P = Poly(P, dim, None, dtype) P = chaospy.poly.shaping.reshape(P, shape) P = chaospy.poly.collection.prod(P, 0) if not dec: P = chaospy.poly.collection.sum(P, 0) return P
def substitute(P, x0, x1, V=0)
Substitute a variable in a polynomial array. Args: P (Poly) : Input data. x0 (Poly, int) : The variable to substitute. Indicated with either unit variable, e.g. `x`, `y`, `z`, etc. or through an integer matching the unit variables dimension, e.g. `x==0`, `y==1`, `z==2`, etc. x1 (Poly) : Simple polynomial to substitute `x0` in `P`. If `x1` is an polynomial array, an error will be raised. Returns: (Poly) : The resulting polynomial (array) where `x0` is replaced with `x1`. Examples: >>> x,y = cp.variable(2) >>> P = cp.Poly([y*y-1, y*x]) >>> print(cp.substitute(P, y, x+1)) [q0^2+2q0, q0^2+q0] With multiple substitutions: >>> print(cp.substitute(P, [x,y], [y,x])) [q0^2-1, q0q1]
3.113055
3.197649
0.973545
if P.shape: return min([is_decomposed(poly) for poly in P]) return len(P.keys) <= 1
def is_decomposed(P)
Check if a polynomial (array) is on component form. Args: P (Poly): Input data. Returns: (bool): True if all polynomials in ``P`` are on component form. Examples: >>> x,y = cp.variable(2) >>> print(cp.is_decomposed(cp.Poly([1,x,x*y]))) True >>> print(cp.is_decomposed(cp.Poly([x+1,x*y]))) False
7.765771
10.465626
0.742026
P = P.copy() if not P: return P out = [Poly({key:P.A[key]}) for key in P.keys] return Poly(out, None, None, None)
def decompose(P)
Decompose a polynomial to component form. In array missing values are padded with 0 to make decomposition compatible with ``chaospy.sum(Q, 0)``. Args: P (Poly) : Input data. Returns: (Poly) : Decomposed polynomial with `P.shape==(M,)+Q.shape` where `M` is the number of components in `P`. Examples: >>> q = cp.variable() >>> P = cp.Poly([q**2-1, 2]) >>> print(P) [q0^2-1, 2] >>> print(cp.decompose(P)) [[-1, 2], [q0^2, 0]] >>> print(cp.sum(cp.decompose(P), 0)) [q0^2-1, 2]
9.090894
12.346194
0.736332
logger = logging.getLogger(__name__) assert len(k_data) == len(distribution), ( "distribution %s is not of length %d" % (distribution, len(k_data))) assert len(k_data.shape) == 1 if numpy.all(k_data == 0): return 1. def cache_key(distribution): return (tuple(k_data), distribution) if cache is None: cache = {} else: if cache_key(distribution) in cache: return cache[cache_key(distribution)] from .. import baseclass try: parameters = load_parameters( distribution, "_mom", parameters, cache, cache_key) out = distribution._mom(k_data, **parameters) except baseclass.StochasticallyDependentError: logger.warning( "Distribution %s has stochastic dependencies; " "Approximating moments with quadrature.", distribution) from .. import approximation out = approximation.approximate_moment(distribution, k_data) if isinstance(out, numpy.ndarray): out = out.item() cache[cache_key(distribution)] = out return out
def evaluate_moment( distribution, k_data, parameters=None, cache=None, )
Evaluate raw statistical moments. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate moment of. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The raw statistical moment of ``distribution`` at location ``x_data`` using parameters ``parameters``.
3.187715
3.269582
0.974961
from .mv_mul import MvMul length = max(left, right) if length == 1: return Mul(left, right) return MvMul(left, right)
def mul(left, right)
Distribution multiplication. Args: left (Dist, numpy.ndarray) : left hand side. right (Dist, numpy.ndarray) : right hand side.
5.679591
6.46332
0.878742
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): if self.matrix: return 0.5*(numpy.dot(left, right) == xloc) return 0.5*(left*right == xloc) else: if self.matrix: Ci = numpy.linalg.inv(left) xloc = numpy.dot(Ci, xloc) assert len(xloc) == len(numpy.dot(Ci, xloc)) else: left = (numpy.asfarray(left).T+numpy.zeros(xloc.shape).T).T valids = left != 0 xloc.T[valids.T] = xloc.T[valids.T]/left.T[valids.T] uloc = evaluation.evaluate_forward(right, xloc, cache=cache) if not self.matrix: uloc = numpy.where(left.T >= 0, uloc.T, 1-uloc.T).T assert uloc.shape == xloc.shape return uloc if self.matrix: Ci = numpy.linalg.inv(right) xloc = numpy.dot(xloc.T, Ci).T else: right = (numpy.asfarray(right).T+numpy.zeros(xloc.shape).T).T valids = right != 0 xloc.T[valids.T] = xloc.T[valids.T]/right.T[valids.T] assert len(left) == len(xloc) uloc = evaluation.evaluate_forward(left, xloc, cache=cache) if not self.matrix: uloc = numpy.where(right.T >= 0, uloc.T, 1-uloc.T).T assert uloc.shape == xloc.shape return uloc
def _cdf(self, xloc, left, right, cache)
Cumulative distribution function. Example: >>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.5 1. 1. ] >>> print(Mul(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.25 0.75 1. ] >>> print(Mul(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0.25 0.75 1. ] >>> print(Mul(1, 1.5).fwd([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.5 1. ] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.fwd([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[0.25 0.3 0.75] [0.5 0.6 1. ]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.fwd([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[0.5 0.6 1. ] [0.25 0.3 0.75]]
2.435273
2.481699
0.981292
left = evaluation.get_inverse_cache(left, cache) right = evaluation.get_inverse_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): if self.matrix: return numpy.dot(left, right) return left*right else: if not self.matrix: uloc = numpy.where(numpy.asfarray(left).T > 0, uloc.T, 1-uloc.T).T xloc = evaluation.evaluate_inverse(right, uloc, cache=cache) if self.matrix: xloc = numpy.dot(left, xloc) else: xloc *= left return xloc if not self.matrix: uloc = numpy.where(numpy.asfarray(right).T > 0, uloc.T, 1-uloc.T).T xloc = evaluation.evaluate_inverse(left, uloc, cache=cache) if self.matrix: xloc = numpy.dot(xloc.T, right).T else: xloc *= right assert uloc.shape == xloc.shape return xloc
def _ppf(self, uloc, left, right, cache)
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(Mul(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])) [0.2 0.4 1.8] >>> print(Mul(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [0.2 0.4 1.8] >>> print(Mul(2, 2).inv([0.1, 0.2, 0.9])) [4. 4. 4.] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.inv([[0.5, 0.6, 0.7], [0.5, 0.6, 0.7]])) [[1. 1.2 1.4] [0.5 0.6 0.7]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.inv([[0.5, 0.6, 0.7], [0.5, 0.6, 0.7]])) [[0.5 0.6 0.7] [1. 1.2 1.4]]
2.616374
2.795278
0.935998
left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.inf else: if self.matrix: Ci = numpy.linalg.inv(left) xloc = numpy.dot(Ci, xloc) else: left = (numpy.asfarray(left).T+numpy.zeros(xloc.shape).T).T valids = left != 0 xloc.T[valids.T] = xloc.T[valids.T]/left.T[valids.T] pdf = evaluation.evaluate_density(right, xloc, cache=cache) if self.matrix: pdf = numpy.dot(Ci, pdf) else: pdf.T[valids.T] /= left.T[valids.T] return pdf if self.matrix: Ci = numpy.linalg.inv(right) xloc = numpy.dot(xloc.T, Ci).T else: right = (numpy.asfarray(right).T+numpy.zeros(xloc.shape).T).T valids = right != 0 xloc.T[valids.T] = xloc.T[valids.T]/right.T[valids.T] xloc.T[~valids.T] = numpy.inf pdf = evaluation.evaluate_density(left, xloc, cache=cache) if self.matrix: pdf = numpy.dot(pdf.T, Ci).T else: pdf.T[valids.T] /= right.T[valids.T] assert pdf.shape == xloc.shape return pdf
def _pdf(self, xloc, left, right, cache)
Probability density function. Example: >>> print(chaospy.Uniform().pdf([-0.5, 0.5, 1.5, 2.5])) [0. 1. 0. 0.] >>> print(Mul(chaospy.Uniform(), 2).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0.5 0.5 0. ] >>> print(Mul(2, chaospy.Uniform()).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0.5 0.5 0. ] >>> print(Mul(1, 1.5).pdf([-0.5, 0.5, 1.5, 2.5])) # Dirac logic [ 0. 0. inf 0.] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.pdf([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [0.5 0.5 0. ] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.pdf([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [0.5 0.5 0. ]
2.291155
2.371123
0.966274
if evaluation.get_dependencies(left, right): raise evaluation.DependencyError( "sum of dependent distributions not feasible: " "{} and {}".format(left, right) ) if isinstance(left, Dist): left = evaluation.evaluate_moment(left, key, cache=cache) else: left = (numpy.array(left).T**key).T if isinstance(right, Dist): right = evaluation.evaluate_moment(right, key, cache=cache) else: right = (numpy.array(right).T**key).T return numpy.sum(left*right)
def _mom(self, key, left, right, cache)
Statistical moments. Example: >>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4)) [1. 0.5 0.3333 0.25 ] >>> print(numpy.around(Mul(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4)) [1. 1. 1.3333 2. ] >>> print(numpy.around(Mul(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4)) [1. 1. 1.3333 2. ] >>> print(numpy.around(Mul(chaospy.Uniform(), chaospy.Uniform()).mom([0, 1, 2, 3]), 4)) [1. 0.25 0.1111 0.0625] >>> print(numpy.around(Mul(2, 2).mom([0, 1, 2, 3]), 4)) [ 1. 4. 16. 64.]
3.626057
3.636152
0.997224
from .. import baseclass collection = [dist] # create DAG as list of nodes and edges: nodes = [dist] edges = [] pool = [dist] while pool: dist = pool.pop() for key in sorted(dist.prm): value = dist.prm[key] if not isinstance(value, baseclass.Dist): continue if (dist, value) not in edges: edges.append((dist, value)) if value not in nodes: nodes.append(value) pool.append(value) # temporary stores used by depth first algorith. permanent_marks = set() temporary_marks = set() def visit(node): if node in permanent_marks: return if node in temporary_marks: raise DependencyError("cycles in dependency structure.") nodes.remove(node) temporary_marks.add(node) for node1, node2 in edges: if node1 is node: visit(node2) temporary_marks.remove(node) permanent_marks.add(node) pool.append(node) # kickstart algorithm. while nodes: node = nodes[0] visit(node) if not reverse: pool = list(reversed(pool)) return pool
def sorted_dependencies(dist, reverse=False)
Extract all underlying dependencies from a distribution sorted topologically. Uses depth-first algorithm. See more here: Args: dist (Dist): Distribution to extract dependencies from. reverse (bool): If True, place dependencies in reverse order. Returns: dependencies (List[Dist]): All distribution that ``dist`` is dependent on, sorted topologically, including itself. Examples: >>> dist1 = chaospy.Uniform() >>> dist2 = chaospy.Normal(dist1) >>> print(sorted_dependencies(dist1)) [Uniform(lower=0, upper=1), Mul(uniform(), 0.5), uniform()] >>> print(sorted_dependencies(dist2)) # doctest: +NORMALIZE_WHITESPACE [Normal(mu=Uniform(lower=0, upper=1), sigma=1), Uniform(lower=0, upper=1), Mul(uniform(), 0.5), uniform(), Mul(normal(), 1), normal()] >>> dist1 in sorted_dependencies(dist2) True >>> dist2 in sorted_dependencies(dist1) False Raises: DependencyError: If the dependency DAG is cyclic, dependency resolution is not possible. See also: Depth-first algorithm section: https://en.wikipedia.org/wiki/Topological_sorting
3.677789
3.512196
1.047148
from .. import baseclass distributions = [ sorted_dependencies(dist) for dist in distributions if isinstance(dist, baseclass.Dist) ] dependencies = list() for idx, dist1 in enumerate(distributions): for dist2 in distributions[idx+1:]: dependencies.extend([dist for dist in dist1 if dist in dist2]) return sorted(dependencies)
def get_dependencies(*distributions)
Get underlying dependencies that are shared between distributions. If more than two distributions are provided, any pair-wise dependency between any two distributions are included, implying that an empty set is returned if and only if the distributions are i.i.d. Args: distributions: Distributions to check for dependencies. Returns: dependencies (List[Dist]): Distributions dependency shared at least between at least one pair from ``distributions``. Examples: >>> dist1 = chaospy.Uniform(1, 2) >>> dist2 = chaospy.Uniform(1, 2) * dist1 >>> dist3 = chaospy.Uniform(3, 5) >>> print(chaospy.get_dependencies(dist1, dist2)) [uniform(), Mul(uniform(), 0.5), Uniform(lower=1, upper=2)] >>> print(chaospy.get_dependencies(dist1, dist3)) [] >>> print(chaospy.get_dependencies(dist2, dist3)) [] >>> print(chaospy.get_dependencies(dist1, dist2, dist3)) [uniform(), Mul(uniform(), 0.5), Uniform(lower=1, upper=2)]
4.06251
4.932314
0.823652
polynomials, norms, _, _ = chaospy.quad.generate_stieltjes( dist=dist, order=numpy.max(order), retall=True, **kws) if normed: for idx, poly in enumerate(polynomials): polynomials[idx] = poly / numpy.sqrt(norms[:, idx]) norms = norms**0 dim = len(dist) if dim > 1: mv_polynomials = [] mv_norms = [] indices = chaospy.bertran.bindex( start=0, stop=order, dim=dim, sort=sort, cross_truncation=cross_truncation, ) for index in indices: poly = polynomials[index[0]][0] for idx in range(1, dim): poly = poly * polynomials[index[idx]][idx] mv_polynomials.append(poly) if retall: for index in indices: mv_norms.append( numpy.prod([norms[idx, index[idx]] for idx in range(dim)])) else: mv_norms = norms[0] mv_polynomials = polynomials polynomials = chaospy.poly.flatten(chaospy.poly.Poly(mv_polynomials)) if retall: return polynomials, numpy.array(mv_norms) return polynomials
def orth_ttr( order, dist, normed=False, sort="GR", retall=False, cross_truncation=1., **kws)
Create orthogonal polynomial expansion from three terms recursion formula. Args: order (int): Order of polynomial expansion. dist (Dist): Distribution space where polynomials are orthogonal If dist.ttr exists, it will be used. Must be stochastically independent. normed (bool): If True orthonormal polynomials will be used. sort (str): Polynomial sorting. Same as in basis. retall (bool): If true return numerical stabilized norms as well. Roughly the same as ``cp.E(orth**2, dist)``. cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. only include terms where the exponents ``K`` satisfied the equation ``order >= sum(K**(1/cross_truncation))**cross_truncation``. Returns: (Poly, numpy.ndarray): Orthogonal polynomial expansion and norms of the orthogonal expansion on the form ``E(orth**2, dist)``. Calculated using recurrence coefficients for stability. Examples: >>> Z = chaospy.Normal() >>> print(chaospy.around(chaospy.orth_ttr(4, Z), 4)) [1.0, q0, q0^2-1.0, q0^3-3.0q0, q0^4-6.0q0^2+3.0]
3.123797
3.478256
0.898093
return evaluation.evaluate_density(dist, -xloc, cache=cache)
def _pdf(self, xloc, dist, cache)
Probability density function.
12.304729
12.991741
0.947119
return -evaluation.evaluate_inverse(dist, 1-q, cache=cache)
def _ppf(self, q, dist, cache)
Point percentile function.
15.664988
15.168934
1.032702
return (-1)**numpy.sum(k)*evaluation.evaluate_moment( dist, k, cache=cache)
def _mom(self, k, dist, cache)
Statistical moments.
12.485264
13.14511
0.949803
a,b = evaluation.evaluate_recurrence_coefficients(dist, k) return -a, b
def _ttr(self, k, dist, cache)
Three terms recursion coefficients.
17.083439
12.68185
1.347078
order = sorted(GENZ_KEISTER_22.keys())[order] abscissas, weights = GENZ_KEISTER_22[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
def quad_genz_keister_22 ( order )
Hermite Genz-Keister 22 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_22(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
2.77165
3.531353
0.784869
for datatype, identifier in { int: _identify_scaler, numpy.int8: _identify_scaler, numpy.int16: _identify_scaler, numpy.int32: _identify_scaler, numpy.int64: _identify_scaler, float: _identify_scaler, numpy.float16: _identify_scaler, numpy.float32: _identify_scaler, numpy.float64: _identify_scaler, chaospy.poly.base.Poly: _identify_poly, dict: _identify_dict, numpy.ndarray: _identify_iterable, list: _identify_iterable, tuple: _identify_iterable, }.items(): if isinstance(core, datatype): return identifier(core) raise TypeError( "Poly arg: 'core' is not a valid type " + repr(core))
def identify_core(core)
Identify the polynomial argument.
2.500088
2.374325
1.052968
return core.A, core.dim, core.shape, core.dtype
def _identify_poly(core)
Specification for a polynomial.
13.977036
13.301918
1.050753
if not core: return {}, 1, (), int core = core.copy() key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0] shape = numpy.array(core[key]).shape dtype = numpy.array(core[key]).dtype dim = len(key) return core, dim, shape, dtype
def _identify_dict(core)
Specification for a dictionary.
5.738251
5.676239
1.010925
if isinstance(core, numpy.ndarray) and not core.shape: return {(0,):core}, 1, (), core.dtype core = [chaospy.poly.base.Poly(a) for a in core] shape = (len(core),) + core[0].shape dtype = chaospy.poly.typing.dtyping(*[_.dtype for _ in core]) dims = numpy.array([a.dim for a in core]) dim = numpy.max(dims) if dim != numpy.min(dims): core = [chaospy.poly.dimension.setdim(a, dim) for a in core] out = {} for idx, core_ in enumerate(core): for key in core_.keys: if not key in out: out[key] = numpy.zeros(shape, dtype=dtype) out[key][idx] = core_.A[key] return out, dim, shape, dtype
def _identify_iterable(core)
Specification for a list, tuple, numpy.ndarray.
3.891582
3.82898
1.016349
if isinstance(poly, distributions.Dist): poly, dist = polynomials.variable(len(poly)), poly else: poly = polynomials.Poly(poly) cov = Cov(poly, dist, **kws) var = numpy.diag(cov) vvar = numpy.sqrt(numpy.outer(var, var)) return numpy.where(vvar > 0, cov/vvar, 0)
def Corr(poly, dist=None, **kws)
Correlation matrix of a distribution or polynomial. Args: poly (Poly, Dist): Input to take correlation on. Must have ``len(poly)>=2``. dist (Dist): Defines the space the correlation is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Correlation matrix with ``correlation.shape == poly.shape+poly.shape``. Examples: >>> Z = chaospy.MvNormal([3, 4], [[2, .5], [.5, 1]]) >>> print(numpy.around(chaospy.Corr(Z), 4)) [[1. 0.3536] [0.3536 1. ]] >>> x = chaospy.variable() >>> Z = chaospy.Normal() >>> print(numpy.around(chaospy.Corr([x, x**2], Z), 4)) [[1. 0.] [0. 1.]]
3.934036
4.729446
0.831817
from ...quad import quad_clenshaw_curtis q1, w1 = quad_clenshaw_curtis(int(10**3*a), 0, a) q2, w2 = quad_clenshaw_curtis(int(10**3*(1-a)), a, 1) q = numpy.concatenate([q1,q2], 1) w = numpy.concatenate([w1,w2]) w = w*numpy.where(q<a, 2*q/a, 2*(1-q)/(1-a)) from chaospy.poly import variable x = variable() orth = [x*0, x**0] inner = numpy.sum(q*w, -1) norms = [1., 1.] A,B = [],[] for n in range(k): A.append(inner/norms[-1]) B.append(norms[-1]/norms[-2]) orth.append((x-A[-1])*orth[-1]-orth[-2]*B[-1]) y = orth[-1](*q)**2*w inner = numpy.sum(q*y, -1) norms.append(numpy.sum(y, -1)) A, B = numpy.array(A).T[0], numpy.array(B).T return A[-1], B[-1]
def tri_ttr(k, a)
Custom TTR function. Triangle distribution does not have an analytical TTR function, but because of its non-smooth nature, a blind integration scheme will converge very slowly. However, by splitting the integration into two divided at the discontinuity in the derivative, TTR can be made operative.
3.711617
3.795666
0.977856
if isinstance(poly, distributions.Dist): x = polynomials.variable(len(poly)) poly, dist = x, poly else: poly = polynomials.Poly(poly) if poly.dim < len(dist): polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) m1 = E(poly, dist) m2 = E(poly**2, dist) m3 = E(poly**3, dist) out = (m3-3*m2*m1+2*m1**3)/(m2-m1**2)**1.5 out = numpy.reshape(out, shape) return out
def Skew(poly, dist=None, **kws)
Skewness operator. Element by element 3rd order statistics of a distribution or polynomial. Args: poly (Poly, Dist): Input to take skewness on. dist (Dist): Defines the space the skewness is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``skewness.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.Skew(dist)) [2. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.Skew(poly, dist)) [nan 2. 0. 0.]
3.6403
3.653351
0.996428
assert len(x_data) == len(distribution), ( "distribution %s is not of length %d" % (distribution, len(x_data))) assert hasattr(distribution, "_cdf"), ( "distribution require the `_cdf` method to function.") cache = cache if cache is not None else {} parameters = load_parameters( distribution, "_cdf", parameters=parameters, cache=cache) # Store cache. cache[distribution] = x_data # Evaluate forward function. out = numpy.zeros(x_data.shape) out[:] = distribution._cdf(x_data, **parameters) return out
def evaluate_forward( distribution, x_data, parameters=None, cache=None, )
Evaluate forward Rosenblatt transformation. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate forward transformation at. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The cumulative distribution values of ``distribution`` at location ``x_data`` using parameters ``parameters``.
4.161135
4.162122
0.999763
if isinstance(poly, distributions.Dist): x = polynomials.variable(len(poly)) poly, dist = x, poly else: poly = polynomials.Poly(poly) dim = len(dist) if poly.dim<dim: polynomials.setdim(poly, dim) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys N = len(keys) A = poly.A keys1 = numpy.array(keys).T if dim==1: keys1 = keys1[0] keys2 = sum(numpy.meshgrid(keys, keys)) else: keys2 = numpy.empty((dim, N, N)) for i in range(N): for j in range(N): keys2[:, i, j] = keys1[:, i]+keys1[:, j] m1 = numpy.outer(*[dist.mom(keys1, **kws)]*2) m2 = dist.mom(keys2, **kws) mom = m2-m1 out = numpy.zeros(poly.shape) for i in range(N): a = A[keys[i]] out += a*a*mom[i, i] for j in range(i+1, N): b = A[keys[j]] out += 2*a*b*mom[i, j] out = out.reshape(shape) return out
def Var(poly, dist=None, **kws)
Element by element 2nd order statistics. Args: poly (Poly, Dist): Input to take variance on. dist (Dist): Defines the space the variance is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``variation.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.Var(dist)) [1. 4.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.Var(poly, dist)) [ 0. 1. 4. 800.]
3.160985
3.203896
0.986607
if not isinstance(poly, (distributions.Dist, polynomials.Poly)): print(type(poly)) print("Approximating expected value...") out = quadrature.quad(poly, dist, veceval=True, **kws) print("done") return out if isinstance(poly, distributions.Dist): dist, poly = poly, polynomials.variable(len(poly)) if not poly.keys: return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if len(dist) == 1: mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += A[keys[i]]*mom[i] out = numpy.reshape(out, shape) return out
def E(poly, dist=None, **kws)
Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.]
3.671103
3.671416
0.999915
dim = len(dist) if poly.dim<dim: poly = chaospy.poly.setdim(poly, len(dist)) zero = [0]*dim out = numpy.zeros((dim, dim) + poly.shape) mean = E(poly, dist) V_total = Var(poly, dist) E_cond_i = [None]*dim V_E_cond_i = [None]*dim for i in range(dim): zero[i] = 1 E_cond_i[i] = E_cond(poly, zero, dist, **kws) V_E_cond_i[i] = Var(E_cond_i[i], dist, **kws) zero[i] = 0 for i in range(dim): zero[i] = 1 for j in range(i+1, dim): zero[j] = 1 E_cond_ij = E_cond(poly, zero, dist, **kws) out[i, j] = ((Var(E_cond_ij, dist, **kws)-V_E_cond_i[i] - V_E_cond_i[j]) / (V_total+(V_total == 0))*(V_total != 0)) out[j, i] = out[i, j] zero[j] = 0 zero[i] = 0 return out
def Sens_m2(poly, dist, **kws)
Variance-based decomposition/Sobol' indices. Second order sensitivity indices. Args: poly (Poly): Polynomial to find second order Sobol indices on. dist (Dist): The distributions of the input used in ``poly``. Returns: (numpy.ndarray): First order sensitivity indices for each parameters in ``poly``, with shape ``(len(dist), len(dist)) + poly.shape``. Examples: >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x*y, x*x*y*y, x*y*y*y]) >>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2) >>> indices = chaospy.Sens_m2(poly, dist) >>> print(indices) [[[0. 0. 0. 0. ] [0. 0.14285714 0.28571429 0.20930233]] <BLANKLINE> [[0. 0.14285714 0.28571429 0.20930233] [0. 0. 0. 0. ]]]
2.615709
2.695558
0.970378
x_data = .5*numpy.cos(numpy.arange(order, 0, -1)*numpy.pi/(order+1)) + .5 x_data = chaospy.quad.combine([x_data]*dim) return x_data.T
def create_chebyshev_samples(order, dim=1)
Chebyshev sampling function. Args: order (int): The number of samples to create along each axis. dim (int): The number of dimensions to create samples for. Returns: samples following Chebyshev sampling scheme mapped to the ``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.
5.207179
6.62358
0.786158
dim = len(dist) basis = chaospy.poly.basis( start=1, stop=order, dim=dim, sort=sort, cross_truncation=cross_truncation, ) length = len(basis) cholmat = chaospy.chol.gill_king(chaospy.descriptives.Cov(basis, dist)) cholmat_inv = numpy.linalg.inv(cholmat.T).T if not normed: diag_mesh = numpy.repeat(numpy.diag(cholmat_inv), len(cholmat_inv)) cholmat_inv /= diag_mesh.reshape(cholmat_inv.shape) coefs = numpy.empty((length+1, length+1)) coefs[1:, 1:] = cholmat_inv coefs[0, 0] = 1 coefs[0, 1:] = 0 expected = -numpy.sum( cholmat_inv*chaospy.descriptives.E(basis, dist, **kws), -1) coefs[1:, 0] = expected coefs = coefs.T out = {} out[(0,)*dim] = coefs[0] for idx in range(length): index = basis[idx].keys[0] out[index] = coefs[idx+1] polynomials = chaospy.poly.Poly(out, dim, coefs.shape[1:], float) return polynomials
def orth_chol(order, dist, normed=True, sort="GR", cross_truncation=1., **kws)
Create orthogonal polynomial expansion from Cholesky decomposition. Args: order (int): Order of polynomial expansion dist (Dist): Distribution space where polynomials are orthogonal normed (bool): If True orthonormal polynomials will be used instead of monic. sort (str): Ordering argument passed to poly.basis. If custom basis is used, argument is ignored. cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Examples: >>> Z = chaospy.Normal() >>> print(chaospy.around(chaospy.orth_chol(3, Z), 4)) [1.0, q0, 0.7071q0^2-0.7071, 0.4082q0^3-1.2247q0]
3.825425
4.024458
0.950544
P = P.copy() if not chaospy.poly.caller.is_decomposed(P): raise TypeError("Polynomial not on component form.") A = [] dim = P.dim coef = P(*(1,)*dim) M = coef!=0 zero = (0,)*dim ones = [1]*dim A = [{zero: coef}] if zero in P.A: del P.A[zero] P.keys.remove(zero) for key in P.keys: P.A[key] = (P.A[key]!=0) for i in range(dim): A.append({}) ones[i] = numpy.nan Q = P(*ones) ones[i] = 1 if isinstance(Q, numpy.ndarray): continue Q = Q.A if zero in Q: del Q[zero] for key in Q: val = Q[key] A[-1][key] = val A = [Poly(a, dim, None, P.dtype) for a in A] P = Poly(A, dim, None, P.dtype) P = P + 1*(P(*(1,)*dim)==0)*M return P
def dimsplit(P)
Segmentize a polynomial (on decomposed form) into it's dimensions. In array missing values are padded with 1 to make dimsplit compatible with ``poly.prod(Q, 0)``. Args: P (Poly): Input polynomial. Returns: (Poly): Segmentet polynomial array where ``Q.shape==P.shape+(P.dim+1,)``. The surplus element in ``P.dim+1`` is used for coefficients. Examples: >>> x,y = chaospy.variable(2) >>> P = chaospy.Poly([2, x*y, 2*x]) >>> Q = chaospy.dimsplit(P) >>> print(Q) [[2, 1, 2], [1, q0, q0], [1, q1, 1]] >>> print(chaospy.prod(Q, 0)) [2, q0q1, 2q0]
5.602407
5.587273
1.002709
P = P.copy() ldim = P.dim if not dim: dim = ldim+1 if dim==ldim: return P P.dim = dim if dim>ldim: key = numpy.zeros(dim, dtype=int) for lkey in P.keys: key[:ldim] = lkey P.A[tuple(key)] = P.A.pop(lkey) else: key = numpy.zeros(dim, dtype=int) for lkey in P.keys: if not sum(lkey[ldim-1:]) or not sum(lkey): P.A[lkey[:dim]] = P.A.pop(lkey) else: del P.A[lkey] P.keys = sorted(P.A.keys(), key=sort_key) return P
def setdim(P, dim=None)
Adjust the dimensions of a polynomial. Output the results into Poly object Args: P (Poly) : Input polynomial dim (int) : The dimensions of the output polynomial. If omitted, increase polynomial with one dimension. If the new dim is smaller then P's dimensions, variables with cut components are all cut. Examples: >>> x,y = chaospy.variable(2) >>> P = x*x-x*y >>> print(chaospy.setdim(P, 1)) q0^2
3.095418
3.649969
0.848067
return evaluation.evaluate_density( dist, base**xloc, cache=cache)*base**xloc*numpy.log(base)
def _pdf(self, xloc, dist, base, cache)
Probability density function.
9.344839
9.099339
1.02698
return evaluation.evaluate_forward(dist, base**xloc, cache=cache)
def _cdf(self, xloc, dist, base, cache)
Cumulative distribution function.
14.817529
12.1355
1.221007
mat = numpy.asfarray(mat) size = mat.shape[0] # Calculate gamma(mat) and xi_(mat). gamma = 0.0 xi_ = 0.0 for idy in range(size): gamma = max(abs(mat[idy, idy]), gamma) for idx in range(idy+1, size): xi_ = max(abs(mat[idy, idx]), xi_) # Calculate delta and beta. delta = eps * max(gamma + xi_, 1.0) if size == 1: beta = numpy.sqrt(max(gamma, eps)) else: beta = numpy.sqrt(max(gamma, xi_ / numpy.sqrt(size*size - 1.0), eps)) # Initialise data structures. mat_a = 1.0 * mat mat_r = 0.0 * mat perm = numpy.eye(size, dtype=int) # Main loop. for idx in range(size): # Row and column swapping, find the index > idx of the largest # idzgonal element. idz = idx for idy in range(idx+1, size): if abs(mat_a[idy, idy]) >= abs(mat_a[idz, idz]): idz = idy if idz != idx: mat_a, mat_r, perm = swap_across(idz, idx, mat_a, mat_r, perm) # Calculate a_pred. theta_j = 0.0 if idx < size-1: for idy in range(idx+1, size): theta_j = max(theta_j, abs(mat_a[idx, idy])) a_pred = max(abs(mat_a[idx, idx]), (theta_j/beta)**2, delta) # Calculate row idx of r and update a. mat_r[idx, idx] = numpy.sqrt(a_pred) for idy in range(idx+1, size): mat_r[idx, idy] = mat_a[idx, idy] / mat_r[idx, idx] for idz in range(idx+1, idy+1): # Keep matrix a symmetric: mat_a[idy, idz] = mat_a[idz, idy] = \ mat_a[idz, idy] - mat_r[idx, idy] * mat_r[idx, idz] # The Cholesky factor of mat. return perm, mat_r.T
def gill_murray_wright(mat, eps=1e-16)
Gill-Murray-Wright algorithm for pivoting modified Cholesky decomposition. Return ``(perm, lowtri, error)`` such that `perm.T*mat*perm = lowtri*lowtri.T` is approximately correct. Args: mat (numpy.ndarray): Must be a non-singular and symmetric matrix eps (float): Error tolerance used in algorithm. Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Permutation matrix used for pivoting and lower triangular factor. Examples: >>> mat = numpy.matrix([[4, 2, 1], [2, 6, 3], [1, 3, -.004]]) >>> perm, lowtri = gill_murray_wright(mat) >>> perm, lowtri = numpy.matrix(perm), numpy.matrix(lowtri) >>> print(perm) [[0 1 0] [1 0 0] [0 0 1]] >>> print(numpy.around(lowtri, 4)) [[ 2.4495 0. 0. ] [ 0.8165 1.8257 0. ] [ 1.2247 -0. 1.2264]] >>> print(numpy.around(perm*lowtri*lowtri.T*perm.T, 4)) [[4. 2. 1. ] [2. 6. 3. ] [1. 3. 3.004]]
2.941397
3.018938
0.974315
# Temporary permutation matrix for swaping 2 rows or columns. size = mat_a.shape[0] perm_new = numpy.eye(size, dtype=int) # Modify the permutation matrix perm by swaping columns. perm_row = 1.0*perm[:, idx] perm[:, idx] = perm[:, idy] perm[:, idy] = perm_row # Modify the permutation matrix p by swaping rows (same as # columns because p = pT). row_p = 1.0 * perm_new[idx] perm_new[idx] = perm_new[idy] perm_new[idy] = row_p # Permute mat_a and r (p = pT). mat_a = numpy.dot(perm_new, numpy.dot(mat_a, perm_new)) mat_r = numpy.dot(mat_r, perm_new) return mat_a, mat_r, perm
def swap_across(idx, idy, mat_a, mat_r, perm)
Interchange row and column idy and idx.
3.411592
3.330359
1.024392
primes = list(primes) if not primes: prime_order = 10*dim while len(primes) < dim: primes = create_primes(prime_order) prime_order *= 2 primes = primes[:dim] assert len(primes) == dim, "not enough primes" if burnin < 0: burnin = max(primes) out = numpy.empty((dim, order)) indices = [idx+burnin for idx in range(order)] for dim_ in range(dim): out[dim_] = create_van_der_corput_samples( indices, number_base=primes[dim_]) return out
def create_halton_samples(order, dim=1, burnin=-1, primes=())
Create Halton sequence. For ``dim == 1`` the sequence falls back to Van Der Corput sequence. Args: order (int): The order of the Halton sequence. Defines the number of samples. dim (int): The number of dimensions in the Halton sequence. burnin (int): Skip the first ``burnin`` samples. If negative, the maximum of ``primes`` is used. primes (tuple): The (non-)prime base to calculate values along each axis. If empty, growing prime values starting from 2 will be used. Returns (numpy.ndarray): Halton sequence with ``shape == (dim, order)``.
3.431532
3.134362
1.09481
if x_data is None: try: x_data = evaluation.evaluate_inverse( self, numpy.array([[0.5]]*len(self))) except StochasticallyDependentError: x_data = approximation.find_interior_point(self) shape = (len(self),) if hasattr(self, "_range"): return self._range(x_data, {}) else: x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) q_data = evaluation.evaluate_bound(self, x_data) q_data = q_data.reshape((2,)+shape) return q_data
def range(self, x_data=None)
Generate the upper and lower bounds of a distribution. Args: x_data (numpy.ndarray) : The bounds might vary over the sample space. By providing x_data you can specify where in the space the bound should be taken. If omitted, a (pseudo-)random sample is used. Returns: (numpy.ndarray): The lower (out[0]) and upper (out[1]) bound where out.shape=(2,)+x_data.shape
3.861584
3.912972
0.986867
x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) lower, upper = evaluation.evaluate_bound(self, x_data) q_data = numpy.zeros(x_data.shape) indices = x_data > upper q_data[indices] = 1 indices = ~indices & (x_data >= lower) q_data[indices] = numpy.clip(evaluation.evaluate_forward( self, x_data), a_min=0, a_max=1)[indices] q_data = q_data.reshape(shape) return q_data
def fwd(self, x_data)
Forward Rosenblatt transformation. Args: x_data (numpy.ndarray): Location for the distribution function. ``x_data.shape`` must be compatible with distribution shape. Returns: (numpy.ndarray): Evaluated distribution function values, where ``out.shape==x_data.shape``.
2.994967
3.470877
0.862885
if len(self) > 1 and evaluation.get_dependencies(*self): raise StochasticallyDependentError( "Cumulative distribution does not support dependencies.") q_data = self.fwd(x_data) if len(self) > 1: q_data = numpy.prod(q_data, 0) return q_data
def cdf(self, x_data)
Cumulative distribution function. Note that chaospy only supports cumulative distribution functions for stochastically independent distributions. Args: x_data (numpy.ndarray): Location for the distribution function. Assumes that ``len(x_data) == len(distribution)``. Returns: (numpy.ndarray): Evaluated distribution function values, where output has shape ``x_data.shape`` in one dimension and ``x_data.shape[1:]`` in higher dimensions.
5.813375
5.700745
1.019757
q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) x_data = evaluation.evaluate_inverse(self, q_data) lower, upper = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
def inv(self, q_data, max_iterations=100, tollerance=1e-5)
Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``.
2.965937
3.340713
0.887815
x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) lower, upper = evaluation.evaluate_bound(self, x_data) f_data = numpy.zeros(x_data.shape) indices = (x_data <= upper) & (x_data >= lower) f_data[indices] = evaluation.evaluate_density(self, x_data)[indices] f_data = f_data.reshape(shape) if len(self) > 1: f_data = numpy.prod(f_data, 0) return f_data
def pdf(self, x_data, step=1e-7)
Probability density function. If possible the density will be calculated analytically. If not possible, it will be approximated by approximating the one-dimensional derivative of the forward Rosenblatt transformation and multiplying the component parts. Note that even if the distribution is multivariate, each component of the Rosenblatt is one-dimensional. Args: x_data (numpy.ndarray): Location for the density function. ``x_data.shape`` must be compatible with distribution shape. step (float, numpy.ndarray): If approximation is used, the step length given in the approximation of the derivative. If array provided, elements are used along each axis. Returns: (numpy.ndarray): Evaluated density function values. Shapes are related through the identity ``x_data.shape == dist.shape+out.shape``.
2.732283
3.04945
0.895992
size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple, list, numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size from . import sampler out = sampler.generator.generate_samples( order=size_, domain=self, rule=rule, antithetic=antithetic) try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, int(out.size/dim)) return out
def sample(self, size=(), rule="R", antithetic=None)
Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``.
3.270472
3.179294
1.028679
K = numpy.asarray(K, dtype=int) shape = K.shape dim = len(self) if dim > 1: shape = shape[1:] size = int(K.size/dim) K = K.reshape(dim, size) cache = {} out = [evaluation.evaluate_moment(self, kdata, cache) for kdata in K.T] out = numpy.array(out) return out.reshape(shape)
def mom(self, K, **kws)
Raw statistical moments. Creates non-centralized raw moments from the random variable. If analytical options can not be utilized, Monte Carlo integration will be used. Args: K (numpy.ndarray): Index of the raw moments. k.shape must be compatible with distribution shape. Sampling scheme when performing Monte Carlo rule (str): rule for estimating the moment if the analytical method fails. composite (numpy.ndarray): If provided, composit quadrature will be used. Ignored in the case if gaussian=True. If int provided, determines number of even domain splits. If array of ints, determines number of even domain splits along each axis. If array of arrays/floats, determines location of splits. antithetic (numpy.ndarray): List of bool. Represents the axes to mirror using antithetic variable during MCI. Returns: (numpy.ndarray): Shapes are related through the identity ``k.shape == dist.shape+k.shape``.
4.469238
4.471303
0.999538
kloc = numpy.asarray(kloc, dtype=int) shape = kloc.shape kloc = kloc.reshape(len(self), -1) cache = {} out = [evaluation.evaluate_recurrence_coefficients(self, k) for k in kloc.T] out = numpy.array(out).T return out.reshape((2,)+shape)
def ttr(self, kloc, acc=10**3, verbose=1)
Three terms relation's coefficient generator Args: k (numpy.ndarray, int): The order of the coefficients. acc (int): Accuracy of discretized Stieltjes if analytical methods are unavailable. Returns: (Recurrence coefficients): Where out[0] is the first (A) and out[1] is the second coefficient With ``out.shape==(2,)+k.shape``.
4.659978
3.9951
1.166423
if N is None: N = len(poly)/2 + 1 corr = Corr(poly, dist, **kws) out = numpy.empty(N) for n in range(N): out[n] = numpy.mean(corr.diagonal(n), 0) return out
def Acf(poly, dist, N=None, **kws)
Auto-correlation function. Args: poly (Poly): Polynomial of interest. Must have ``len(poly) > N``. dist (Dist): Defines the space the correlation is taken on. N (int): The number of time steps appart included. If omited set to ``len(poly)/2+1``. Returns: (numpy.ndarray) : Auto-correlation of ``poly`` with shape ``(N,)``. Note that by definition ``Q[0]=1``. Examples: >>> poly = chaospy.prange(10)[1:] >>> Z = chaospy.Uniform() >>> print(numpy.around(chaospy.Acf(poly, Z, 5), 4)) [1. 0.9915 0.9722 0.9457 0.9127]
3.595758
4.88451
0.736155
rc("figure", figsize=[8.,4.]) rc("figure.subplot", left=.08, top=.95, right=.98) rc("image", cmap="gray") seed(1000) Q1 = cp.Gamma(2) Q2 = cp.Normal(0, Q1) Q = cp.J(Q1, Q2) #end subplot(121) s,t = meshgrid(linspace(0,5,200), linspace(-6,6,200)) contourf(s,t,Q.pdf([s,t]),50) xlabel("$q_1$") ylabel("$q_2$") subplot(122) Qr = Q.sample(500) scatter(*Qr, s=10, c="k", marker="s") xlabel("$Q_1$") ylabel("$Q_2$") axis([0,5,-6,6]) savefig("multivariate.png"); clf() Q2 = cp.Gamma(1) Q1 = cp.Normal(Q2**2, Q2+1) Q = cp.J(Q1, Q2) #end subplot(121) s,t = meshgrid(linspace(-4,7,200), linspace(0,3,200)) contourf(s,t,Q.pdf([s,t]),30) xlabel("$q_1$") ylabel("$q_2$") subplot(122) Qr = Q.sample(500) scatter(*Qr) xlabel("$Q_1$") ylabel("$Q_2$") axis([-4,7,0,3]) savefig("multivariate2.png"); clf()
def plot_figures()
Plot figures for multivariate distribution section.
2.660733
2.612596
1.018425
if isinstance(vari, Poly): shape = int(numpy.prod(vari.shape)) return reshape(vari, (shape,)) return numpy.array(vari).flatten()
def flatten(vari)
Flatten a shapeable quantity. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Shapeable input quantity. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari`` with `len(Q.shape)==1`. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.flatten(P)) [1, q0, q0^2, q0^3]
5.530801
8.068131
0.685512
if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = reshape(core[key], shape) out = Poly(core, vari.dim, shape, vari.dtype) return out return numpy.asarray(vari).reshape(shape)
def reshape(vari, shape)
Reshape the shape of a shapeable quantity. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Shapeable input quantity. shape (tuple): The polynomials new shape. Must be compatible with the number of elements in ``vari``. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> poly = chaospy.prange(6) >>> print(poly) [1, q0, q0^2, q0^3, q0^4, q0^5] >>> print(chaospy.reshape(poly, (2,3))) [[1, q0, q0^2], [q0^3, q0^4, q0^5]]
4.360572
5.256711
0.829525
if isinstance(vari, Poly): core_old = vari.A.copy() core_new = {} for key in vari.keys: core_new[key] = rollaxis(core_old[key], axis, start) return Poly(core_new, vari.dim, None, vari.dtype) return numpy.rollaxis(vari, axis, start)
def rollaxis(vari, axis, start=0)
Roll the specified axis backwards, until it lies in a given position. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input array or polynomial. axis (int): The axis to roll backwards. The positions of the other axes do not change relative to one another. start (int): The axis is rolled until it lies before thes position.
3.812619
3.609141
1.056378
if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = swapaxes(core[key], ax1, ax2) return Poly(core, vari.dim, None, vari.dtype) return numpy.swapaxes(vari, ax1, ax2)
def swapaxes(vari, ax1, ax2)
Interchange two axes of a polynomial.
3.792858
3.474431
1.091649
if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = roll(core[key], shift, axis) return Poly(core, vari.dim, None, vari.dtype) return numpy.roll(vari, shift, axis)
def roll(vari, shift, axis=None)
Roll array elements along a given axis.
3.803945
3.81055
0.998267
if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = transpose(core[key]) return Poly(core, vari.dim, vari.shape[::-1], vari.dtype) return numpy.transpose(vari)
def transpose(vari)
Transpose a shapeable quantety. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Quantety of interest. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.transpose(P)) [[1, q0^2], [q0, q0^3]]
4.6151
5.578815
0.827255
samples = numpy.asfarray(samples) assert numpy.all(samples <= 1) and numpy.all(samples >= 0), ( "all samples assumed on interval [0, 1].") if len(samples.shape) == 1: samples = samples.reshape(1, -1) inverse_samples = 1-samples dims = len(samples) if not len(axes): axes = (True,) axes = numpy.asarray(axes, dtype=bool).flatten() indices = {tuple(axes*idx) for idx in numpy.ndindex((2,)*dims)} indices = sorted(indices, reverse=True) indices = sorted(indices, key=lambda idx: sum(idx)) out = [numpy.where(idx, inverse_samples.T, samples.T).T for idx in indices] out = numpy.dstack(out).reshape(dims, -1) return out
def create_antithetic_variates(samples, axes=())
Generate antithetic variables. Args: samples (numpy.ndarray): The samples, assumed to be on the [0, 1]^D hyper-cube, to be reflected. axes (tuple): Boolean array of which axes to reflect. If This to limit the number of points created in higher dimensions by reflecting all axes at once. Returns (numpy.ndarray): Same as ``samples``, but with samples internally reflected. roughly equivalent to ``numpy.vstack([samples, 1-samples])`` in one dimensions.
3.442806
3.357889
1.025289
core, dim_, shape_, dtype_ = chaospy.poly.constructor.identify_core(core) core, shape = chaospy.poly.constructor.ensure_shape(core, shape, shape_) core, dtype = chaospy.poly.constructor.ensure_dtype(core, dtype, dtype_) core, dim = chaospy.poly.constructor.ensure_dim(core, dim, dim_) # Remove empty elements for key in list(core.keys()): if np.all(core[key] == 0): del core[key] assert isinstance(dim, int), \ "not recognised type for dim: '%s'" % repr(type(dim)) assert isinstance(shape, tuple), str(shape) assert dtype is not None, str(dtype) # assert non-empty container if not core: core = {(0,)*dim: np.zeros(shape, dtype=dtype)} else: core = {key: np.asarray(value, dtype) for key, value in core.items()} return core, dim, shape, dtype
def preprocess(core, dim, shape, dtype)
Constructor function for the Poly class.
3.077038
2.960576
1.039338
args = [cleanup(arg) for arg in args] if part is not None: parts, orders = part if numpy.array(orders).size == 1: orders = [int(numpy.array(orders).item())]*len(args) parts = numpy.array(parts).flatten() for i, arg in enumerate(args): m, n = float(parts[i]), float(orders[i]) l = len(arg) args[i] = arg[int(m/n*l):int((m+1)/n*l)] shapes = [arg.shape for arg in args] size = numpy.prod(shapes, 0)[0]*numpy.sum(shapes, 0)[1] if size > 10**9: raise MemoryError("Too large sets") if len(args) == 1: out = args[0] elif len(args) == 2: out = combine_two(*args) else: arg1 = combine_two(*args[:2]) out = combine([arg1,]+args[2:]) return out
def combine(args, part=None)
All linear combination of a list of list. Args: args (numpy.ndarray) : List of input arrays. Components to take linear combination of with `args[i].shape=(N[i], M[i])` where N is to be taken linear combination of and M is static. M[i] is set to 1 if missing. Returns: (numpy.array) : matrix of combinations with shape (numpy.prod(N), numpy.sum(M)). Examples: >>> A, B = [1,2], [[4,4],[5,6]] >>> print(chaospy.quad.combine([A, B])) [[1. 4. 4.] [1. 5. 6.] [2. 4. 4.] [2. 5. 6.]]
3.29407
3.484626
0.945315
arg = numpy.asarray(arg) if len(arg.shape) <= 1: arg = arg.reshape(arg.size, 1) elif len(arg.shape) > 2: raise ValueError("shapes must be smaller than 3") return arg
def cleanup(arg)
Clean up the input variable.
3.377416
3.308555
1.020813
x_data = numpy.arange(1, order+1)/(order+1.) x_data = chaospy.quad.combine([x_data]*dim) return x_data.T
def create_grid_samples(order, dim=1)
Create samples from a regular grid. Args: order (int): The order of the grid. Defines the number of samples. dim (int): The number of dimensions in the grid Returns (numpy.ndarray): Regular grid with ``shape == (dim, order)``.
5.953828
6.854823
0.86856
idxm = numpy.array(multi_index(idxi, dim)) idxn = numpy.array(multi_index(idxj, dim)) out = single_index(idxm + idxn) return out
def add(idxi, idxj, dim)
Bertran addition. Example ------- >>> print(chaospy.bertran.add(3, 3, 1)) 6 >>> print(chaospy.bertran.add(3, 3, 2)) 10
3.458387
6.20377
0.557465
return int(scipy.special.comb(order+dim, dim, 1))
def terms(order, dim)
Count the number of polynomials in an expansion. Parameters ---------- order : int The upper order for the expansion. dim : int The number of dimensions of the expansion. Returns ------- N : int The number of terms in an expansion of upper order `M` and number of dimensions `dim`.
10.498835
17.09058
0.614305
def _rec(idx, dim): idxn = idxm = 0 if not dim: return () if idx == 0: return (0, )*dim while terms(idxn, dim) <= idx: idxn += 1 idx -= terms(idxn-1, dim) if idx == 0: return (idxn,) + (0,)*(dim-1) while terms(idxm, dim-1) <= idx: idxm += 1 return (int(idxn-idxm),) + _rec(idx, dim-1) return _rec(idx, dim)
def multi_index(idx, dim)
Single to multi-index using graded reverse lexicographical notation. Parameters ---------- idx : int Index in interger notation dim : int The number of dimensions in the multi-index notation Returns ------- out : tuple Multi-index of `idx` with `len(out)=dim` Examples -------- >>> for idx in range(5): ... print(chaospy.bertran.multi_index(idx, 3)) (0, 0, 0) (1, 0, 0) (0, 1, 0) (0, 0, 1) (2, 0, 0) See Also -------- single_index
3.564613
3.809173
0.935797
if stop is None: start, stop = 0, start start = numpy.array(start, dtype=int).flatten() stop = numpy.array(stop, dtype=int).flatten() sort = sort.upper() total = numpy.mgrid[(slice(numpy.max(stop), -1, -1),)*dim] total = numpy.array(total).reshape(dim, -1) if start.size > 1: for idx, start_ in enumerate(start): total = total[:, total[idx] >= start_] else: total = total[:, total.sum(0) >= start] if stop.size > 1: for idx, stop_ in enumerate(stop): total = total[:, total[idx] <= stop_] total = total.T.tolist() if "G" in sort: total = sorted(total, key=sum) else: def cmp_(idxi, idxj): if not numpy.any(idxi): return 0 if idxi[0] == idxj[0]: return cmp(idxi[:-1], idxj[:-1]) return (idxi[-1] > idxj[-1]) - (idxi[-1] < idxj[-1]) key = functools.cmp_to_key(cmp_) total = sorted(total, key=key) if "I" in sort: total = total[::-1] if "R" in sort: total = [idx[::-1] for idx in total] for pos, idx in reversed(list(enumerate(total))): idx = numpy.array(idx) cross_truncation = numpy.asfarray(cross_truncation) try: if numpy.any(numpy.sum(idx**(1./cross_truncation)) > numpy.max(stop)**(1./cross_truncation)): del total[pos] except (OverflowError, ZeroDivisionError): pass return total
def bindex(start, stop=None, dim=1, sort="G", cross_truncation=1.)
Generator for creating multi-indices. Args: start (int): The lower order of the indices stop (:py:data:typing.Optional[int]): the maximum shape included. If omitted: stop <- start; start <- 0 If int is provided, set as largest total order. If array of int, set as largest order along each axis. dim (int): The number of dimensions in the expansion cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: list: Order list of indices. Examples: >>> print(chaospy.bertran.bindex(2, 3, 2)) [[2, 0], [1, 1], [0, 2], [3, 0], [2, 1], [1, 2], [0, 3]] >>> print(chaospy.bertran.bindex(0, 1, 3)) [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
2.634143
2.723189
0.967301
if -1 in idxm: return 0 order = int(sum(idxm)) dim = len(idxm) if order == 0: return 0 return terms(order-1, dim) + single_index(idxm[1:])
def single_index(idxm)
Multi-index to single integer notation. Uses graded reverse lexicographical notation. Parameters ---------- idxm : numpy.ndarray Index in multi-index notation Returns ------- idx : int Integer index of `idxm` Examples -------- >>> for idx in range(3): ... print(chaospy.bertran.single_index(numpy.eye(3)[idx])) 1 2 3
5.231637
6.171939
0.847649
idxm = multi_index(idx, dim) out = 0 while idxm[-1:] == (0,): out += 1 idxm = idxm[:-1] return out
def rank(idx, dim)
Calculate the index rank according to Bertran's notation.
4.448659
4.485933
0.991691
idxm = multi_index(idx, dim) if axis is None: axis = dim - numpy.argmin(1*(numpy.array(idxm)[::-1] == 0))-1 if not idx: return idx, axis if idxm[axis] == 0: idxi = parent(parent(idx, dim)[0], dim)[0] while child(idxi+1, dim, axis) < idx: idxi += 1 return idxi, axis out = numpy.array(idxm) - 1*(numpy.eye(dim)[axis]) return single_index(out), axis
def parent(idx, dim, axis=None)
Parent node according to Bertran's notation. Parameters ---------- idx : int Index of the child node. dim : int Dimensionality of the problem. axis : int Assume axis direction. Returns ------- out : int Index of parent node with `j<=i`, and `j==i` iff `i==0`. axis : int Dimension direction the parent was found.
5.029588
5.309032
0.947364
idxm = multi_index(idx, dim) out = numpy.array(idxm) + 1*(numpy.eye(len(idxm))[axis]) return single_index(out)
def child(idx, dim, axis)
Child node according to Bertran's notation. Parameters ---------- idx : int Index of the parent node. dim : int Dimensionality of the problem. axis : int Dimension direction to define a child. Must have `0<=axis<dim` Returns ------- out : int Index of child node with `out > idx`. Examples -------- >>> print(chaospy.bertran.child(4, 1, 0)) 5 >>> print(chaospy.bertran.child(4, 2, 1)) 8
7.730153
15.745575
0.490941
idxm = [0]*dim out = [] def _olindex(idx): if numpy.sum(idxm) == order: out.append(idxm[:]) return if idx == dim: return idxm_sum = numpy.sum(idxm) idx_saved = idxm[idx] for idxi in range(order - numpy.sum(idxm) + 1): idxm[idx] = idxi if idxm_sum < order: _olindex(idx+1) else: break idxm[idx] = idx_saved _olindex(0) return numpy.array(out)
def olindex(order, dim)
Create an lexiographical sorted basis for a given order. Examples -------- >>> chaospy.bertran.olindex(3, 2) array([[0, 3], [1, 2], [2, 1], [3, 0]])
3.22001
3.936307
0.818028
indices = [olindex(o, dim) for o in range(order+1)] indices = numpy.vstack(indices) return indices
def olindices(order, dim)
Create an lexiographical sorted basis for a given order. Examples: >>> chaospy.bertran.olindices(2, 2) array([[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0]])
4.073516
7.252534
0.561668
core = core.copy() if shape is None: shape = shape_ elif isinstance(shape, int): shape = (shape,) if tuple(shape) == tuple(shape_): return core, shape ones = np.ones(shape, dtype=int) for key, val in core.items(): core[key] = val*ones return core, shape
def ensure_shape(core, shape, shape_)
Ensure shape is correct.
2.958211
2.848765
1.038419
core = core.copy() if dtype is None: dtype = dtype_ if dtype_ == dtype: return core, dtype for key, val in { int: chaospy.poly.typing.asint, float: chaospy.poly.typing.asfloat, np.float32: chaospy.poly.typing.asfloat, np.float64: chaospy.poly.typing.asfloat, }.items(): if dtype == key: converter = val break else: raise ValueError("dtype not recognised (%s)" % str(dtype)) for key, val in core.items(): core[key] = converter(val) return core, dtype
def ensure_dtype(core, dtype, dtype_)
Ensure dtype is correct.
2.844136
2.764337
1.028867
if dim is None: dim = dim_ if not dim: return core, 1 if dim_ == dim: return core, int(dim) if dim > dim_: key_convert = lambda vari: vari[:dim_] else: key_convert = lambda vari: vari + (0,)*(dim-dim_) new_core = {} for key, val in core.items(): key_ = key_convert(key) if key_ in new_core: new_core[key_] += val else: new_core[key_] = val return new_core, int(dim)
def ensure_dim(core, dim, dim_)
Ensure that dim is correct.
2.831138
2.789147
1.015055
return numpy.sum((max(val)+1)**numpy.arange(len(val)-1, -1, -1)*val)
def sort_key(val)
Sort key for sorting keys in grevlex order.
9.710356
9.675517
1.003601
return Poly(self.A.copy(), self.dim, self.shape, self.dtype)
def copy(self)
Return a copy of the polynomial.
17.558527
11.360004
1.545644
out = numpy.array([self.A[key] for key in self.keys]) out = numpy.rollaxis(out, -1) return out
def coefficients(self)
Polynomial coefficients.
5.197267
5.108103
1.017455
shape = poly.shape poly = polynomials.flatten(poly) dim = len(dist) #sample from the inumpyut dist samples = dist.sample(sample, **kws) qoi_dists = [] for i in range(0, len(poly)): #sample the polynomial solution if dim == 1: dataset = poly[i](samples) else: dataset = poly[i](*samples) lo = dataset.min() up = dataset.max() #creates qoi_dist qoi_dist = distributions.SampleDist(dataset, lo, up) qoi_dists.append(qoi_dist) #reshape the qoi_dists to match the shape of the inumpyut poly qoi_dists = numpy.array(qoi_dists, distributions.Dist) qoi_dists = qoi_dists.reshape(shape) if not shape: qoi_dists = qoi_dists.item() return qoi_dists
def QoI_Dist(poly, dist, sample=10000, **kws)
Constructs distributions for the quantity of interests. The function constructs a kernel density estimator (KDE) for each polynomial (poly) by sampling it. With the KDEs, distributions (Dists) are constructed. The Dists can be used for e.g. plotting probability density functions (PDF), or to make a second uncertainty quantification simulation with that newly generated Dists. Args: poly (Poly): Polynomial of interest. dist (Dist): Defines the space where the samples for the KDE is taken from the poly. sample (int): Number of samples used in estimation to construct the KDE. Returns: (numpy.ndarray): The constructed quantity of interest (QoI) distributions, where ``qoi_dists.shape==poly.shape``. Examples: >>> dist = chaospy.Normal(0, 1) >>> x = chaospy.variable(1) >>> poly = chaospy.Poly([x]) >>> qoi_dist = chaospy.QoI_Dist(poly, dist) >>> values = qoi_dist[0].pdf([-0.75, 0., 0.75]) >>> print(numpy.around(values, 8)) [0.29143037 0.39931708 0.29536329]
3.640353
3.994255
0.911397
order = numpy.asarray(order, dtype=int).flatten() lower = numpy.asarray(lower).flatten() upper = numpy.asarray(upper).flatten() dim = max(lower.size, upper.size, order.size) order = numpy.ones(dim, dtype=int)*order lower = numpy.ones(dim)*lower upper = numpy.ones(dim)*upper if composite is None: composite = numpy.array(0) composite = numpy.asarray(composite) if not composite.size: composite = numpy.array([numpy.linspace(0, 1, composite+1)]*dim) else: composite = numpy.array(composite) if len(composite.shape) <= 1: composite = numpy.transpose([composite]) composite = ((composite.T-lower)/(upper-lower)).T results = [_gauss_legendre(order[i], composite[i]) for i in range(dim)] abscis = numpy.array([_[0] for _ in results]) weights = numpy.array([_[1] for _ in results]) abscis = chaospy.quad.combine(abscis) weights = chaospy.quad.combine(weights) abscis = (upper-lower)*abscis + lower weights = numpy.prod(weights*(upper-lower), 1) return abscis.T, weights
def quad_gauss_legendre(order, lower=0, upper=1, composite=None)
Generate the quadrature nodes and weights in Gauss-Legendre quadrature. Example: >>> abscissas, weights = quad_gauss_legendre(3) >>> print(numpy.around(abscissas, 4)) [[0.0694 0.33 0.67 0.9306]] >>> print(numpy.around(weights, 4)) [0.1739 0.3261 0.3261 0.1739]
2.474604
2.496356
0.991286
inner = numpy.ones(order+1)*0.5 outer = numpy.arange(order+1)**2 outer = outer/(16*outer-4.) banded = numpy.diag(numpy.sqrt(outer[1:]), k=-1) + numpy.diag(inner) + \ numpy.diag(numpy.sqrt(outer[1:]), k=1) vals, vecs = numpy.linalg.eig(banded) abscis, weight = vals.real, vecs[0, :]**2 indices = numpy.argsort(abscis) abscis, weight = abscis[indices], weight[indices] n_abscis = len(abscis) composite = numpy.array(composite).flatten() composite = list(set(composite)) composite = [comp for comp in composite if (comp < 1) and (comp > 0)] composite.sort() composite = [0]+composite+[1] abscissas = numpy.empty(n_abscis*(len(composite)-1)) weights = numpy.empty(n_abscis*(len(composite)-1)) for dim in range(len(composite)-1): abscissas[dim*n_abscis:(dim+1)*n_abscis] = \ abscis*(composite[dim+1]-composite[dim]) + composite[dim] weights[dim*n_abscis:(dim+1)*n_abscis] = \ weight*(composite[dim+1]-composite[dim]) return abscissas, weights
def _gauss_legendre(order, composite=1)
Backend function.
2.506039
2.494658
1.004562
if len(dist) > 1: if isinstance(order, int): values = [quad_gauss_patterson(order, d) for d in dist] else: values = [quad_gauss_patterson(order[i], dist[i]) for i in range(len(dist))] abscissas = [_[0][0] for _ in values] weights = [_[1] for _ in values] abscissas = chaospy.quad.combine(abscissas).T weights = numpy.prod(chaospy.quad.combine(weights), -1) return abscissas, weights order = sorted(PATTERSON_VALUES.keys())[order] abscissas, weights = PATTERSON_VALUES[order] lower, upper = dist.range() abscissas = .5*(abscissas*(upper-lower)+upper+lower) abscissas = abscissas.reshape(1, abscissas.size) weights /= numpy.sum(weights) return abscissas, weights
def quad_gauss_patterson(order, dist)
Generate sets abscissas and weights for Gauss-Patterson quadrature. Args: order (int) : The quadrature order. Must be in the interval (0, 8). dist (Dist) : The domain to create quadrature over. Returns: (numpy.ndarray, numpy.ndarray) : Abscissas and weights. Example: >>> X, W = chaospy.quad_gauss_patterson(3, chaospy.Uniform(0, 1)) >>> print(numpy.around(X, 4)) [[0.0031 0.0198 0.0558 0.1127 0.1894 0.2829 0.3883 0.5 0.6117 0.7171 0.8106 0.8873 0.9442 0.9802 0.9969]] >>> print(numpy.around(W, 4)) [0.0085 0.0258 0.0465 0.0672 0.0858 0.1003 0.1096 0.1128 0.1096 0.1003 0.0858 0.0672 0.0465 0.0258 0.0085] Reference: Prem Kythe, Michael Schaeferkotter, Handbook of Computational Methods for Integration, Chapman and Hall, 2004, ISBN: 1-58488-428-2, LC: QA299.3.K98. Thomas Patterson, The Optimal Addition of Points to Quadrature Formulae, Mathematics of Computation, Volume 22, Number 104, October 1968, pages 847-856.
2.665665
2.739207
0.973152
from ..distributions.baseclass import Dist isdist = isinstance(domain, Dist) if isdist: dim = len(domain) else: dim = np.array(domain[0]).size rule = rule.lower() if len(rule) == 1: rule = collection.QUAD_SHORT_NAMES[rule] quad_function = collection.get_function( rule, domain, normalize, growth=growth, composite=composite, accuracy=accuracy, ) if sparse: order = np.ones(len(domain), dtype=int)*order abscissas, weights = sparse_grid.sparse_grid(quad_function, order, dim) else: abscissas, weights = quad_function(order) assert len(weights) == abscissas.shape[1] assert len(abscissas.shape) == 2 return abscissas, weights
def generate_quadrature( order, domain, accuracy=100, sparse=False, rule="C", composite=1, growth=None, part=None, normalize=False, **kws )
Numerical quadrature node and weight generator. Args: order (int): The order of the quadrature. domain (numpy.ndarray, Dist): If array is provided domain is the lower and upper bounds (lo,up). Invalid if gaussian is set. If Dist is provided, bounds and nodes are adapted to the distribution. This includes weighting the nodes in Clenshaw-Curtis quadrature. accuracy (int): If gaussian is set, but the Dist provieded in domain does not provide an analytical TTR, ac sets the approximation order for the descitized Stieltje's method. sparse (bool): If True used Smolyak's sparse grid instead of normal tensor product grid. rule (str): Rule for generating abscissas and weights. Either done with quadrature rules, or with random samples with constant weights. composite (int): If provided, composite quadrature will be used. Value determines the number of domains along an axis. Ignored in the case gaussian=True. normalize (bool): In the case of distributions, the abscissas and weights are not tailored to a distribution beyond matching the bounds. If True, the samples are normalized multiplying the weights with the density of the distribution evaluated at the abscissas and normalized afterwards to sum to one. growth (bool): If True sets the growth rule for the composite quadrature rule to exponential for Clenshaw-Curtis quadrature.
3.362618
3.381628
0.994378
@wraps(func) def caller(*args, **kwargs): logger = logging.getLogger(__name__) instance = func(*args, **kwargs) logger.warning( "Distribution `chaospy.{}` has been renamed to ".format(name) + "`chaospy.{}` and will be deprecated next release.".format(instance.__class__.__name__)) return instance return caller
def deprecation_warning(func, name)
Add a deprecation warning do each distribution.
4.686034
4.255038
1.101291
if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) freeze = polynomials.Poly(freeze) freeze = polynomials.setdim(freeze, len(dist)) keys = freeze.keys if len(keys) == 1 and keys[0] == (0,)*len(dist): freeze = list(freeze.A.values())[0] else: freeze = numpy.array(keys) freeze = freeze.reshape(int(freeze.size/len(dist)), len(dist)) shape = poly.shape poly = polynomials.flatten(poly) kmax = numpy.max(poly.keys, 0) + 1 keys = [range(k) for k in kmax] A = poly.A.copy() keys = poly.keys out = {} zeros = [0]*poly.dim for i in range(len(keys)): key = list(keys[i]) a = A[tuple(key)] for d in range(poly.dim): for j in range(len(freeze)): if freeze[j, d]: key[d], zeros[d] = zeros[d], key[d] break tmp = a*dist.mom(tuple(key)) if tuple(zeros) in out: out[tuple(zeros)] = out[tuple(zeros)] + tmp else: out[tuple(zeros)] = tmp for d in range(poly.dim): for j in range(len(freeze)): if freeze[j, d]: key[d], zeros[d] = zeros[d], key[d] break out = polynomials.Poly(out, poly.dim, poly.shape, float) out = polynomials.reshape(out, shape) return out
def E_cond(poly, freeze, dist, **kws)
Conditional expected value operator. 1st order statistics of a polynomial on a given probability space conditioned on some of the variables. Args: poly (Poly): Polynomial to find conditional expected value on. freeze (numpy.ndarray): Boolean values defining the conditional variables. True values implies that the value is conditioned on, e.g. frozen during the expected value calculation. dist (Dist) : The distributions of the input used in ``poly``. Returns: (chaospy.poly.base.Poly) : Same as ``poly``, but with the variables not tagged in ``frozen`` integrated away. Examples: >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E_cond(poly, [1, 0], dist)) [1.0, q0, 0.0, 0.0] >>> print(chaospy.E_cond(poly, [0, 1], dist)) [1.0, 1.0, q1, 10.0q1] >>> print(chaospy.E_cond(poly, [1, 1], dist)) [1.0, q0, q1, 10.0q0q1] >>> print(chaospy.E_cond(poly, [0, 0], dist)) [1.0, 1.0, 0.0, 0.0]
2.856781
3.044801
0.938249
logger = logging.getLogger(__name__) logger.debug("generating random samples using rule %s", rule) rule = rule.upper() if isinstance(domain, int): dim = domain trans = lambda x_data: x_data elif isinstance(domain, (tuple, list, numpy.ndarray)): domain = numpy.asfarray(domain) if len(domain.shape) < 2: dim = 1 else: dim = len(domain[0]) trans = lambda x_data: ((domain[1]-domain[0])*x_data.T + domain[0]).T else: dist = domain dim = len(dist) trans = dist.inv if antithetic is not None: from .antithetic import create_antithetic_variates antithetic = numpy.array(antithetic, dtype=bool).flatten() if antithetic.size == 1 and dim > 1: antithetic = numpy.repeat(antithetic, dim) size = numpy.sum(1*numpy.array(antithetic)) order_saved = order order = int(numpy.log(order - dim)) order = order if order > 1 else 1 while order**dim < order_saved: order += 1 trans_ = trans trans = lambda x_data: trans_( create_antithetic_variates(x_data, antithetic)[:, :order_saved]) assert rule in SAMPLERS, "rule not recognised" sampler = SAMPLERS[rule] x_data = trans(sampler(order=order, dim=dim)) logger.debug("order: %d, dim: %d -> shape: %s", order, dim, x_data.shape) return x_data
def generate_samples(order, domain=1, rule="R", antithetic=None)
Sample generator. Args: order (int): Sample order. Determines the number of samples to create. domain (Dist, int, numpy.ndarray): Defines the space where the samples are generated. If integer is provided, the space ``[0, 1]^domain`` will be used. If array-like object is provided, a hypercube it defines will be used. If distribution, the domain it spans will be used. rule (str): rule for generating samples. The various rules are listed in :mod:`chaospy.distributions.sampler.generator`. antithetic (tuple): Sequence of boolean values. Represents the axes to mirror using antithetic variable.
3.110925
2.996636
1.038139
r cords = np.array(cords)+1 slices = [] for cord in cords: slices.append(slice(1, 2**cord+1, 2)) grid = np.mgrid[slices] indices = grid.reshape(len(cords), np.prod(grid.shape[1:])).T sgrid = indices*2.**-cords return sgrid
def sparse_segment(cords)
r""" Create a segment of a sparse grid. Convert a ol-index to sparse grid coordinates on ``[0, 1]^N`` hyper-cube. A sparse grid of order ``D`` coencide with the set of sparse_segments where ``||cords||_1 <= D``. More specifically, a segment of: .. math:: \cup_{cords \in C} sparse_segment(cords) == sparse_grid(M) where: .. math:: C = {cords: M=sum(cords)} Args: cords (numpy.ndarray): The segment to extract. ``cord`` must consist of non-negative integers. Returns: Q (numpy.ndarray): Sparse segment where ``Q.shape==(K, sum(M))`` and ``K`` is segment specific. Examples: >>> print(cp.bertran.sparse_segment([0, 2])) [[0.5 0.125] [0.5 0.375] [0.5 0.625] [0.5 0.875]] >>> print(cp.bertran.sparse_segment([0, 1, 0, 0])) [[0.5 0.25 0.5 0.5 ] [0.5 0.75 0.5 0.5 ]]
4.420599
5.403036
0.81817
abscissas = numpy.asfarray(abscissas) if len(abscissas.shape) == 1: abscissas = abscissas.reshape(1, abscissas.size) dim, size = abscissas.shape order = 1 while chaospy.bertran.terms(order, dim) <= size: order += 1 indices = numpy.array(chaospy.bertran.bindex(0, order-1, dim, sort)[:size]) idx, idy = numpy.mgrid[:size, :size] matrix = numpy.prod(abscissas.T[idx]**indices[idy], -1) det = numpy.linalg.det(matrix) if det == 0: raise numpy.linalg.LinAlgError("invertible matrix required") vec = chaospy.poly.basis(0, order-1, dim, sort)[:size] coeffs = numpy.zeros((size, size)) if size == 1: out = chaospy.poly.basis(0, 0, dim, sort)*abscissas.item() elif size == 2: coeffs = numpy.linalg.inv(matrix) out = chaospy.poly.sum(vec*(coeffs.T), 1) else: for i in range(size): for j in range(size): coeffs[i, j] += numpy.linalg.det(matrix[1:, 1:]) matrix = numpy.roll(matrix, -1, axis=0) matrix = numpy.roll(matrix, -1, axis=1) coeffs /= det out = chaospy.poly.sum(vec*(coeffs.T), 1) return out
def lagrange_polynomial(abscissas, sort="GR")
Create Lagrange polynomials. Args: abscissas (numpy.ndarray): Sample points where the Lagrange polynomials shall be defined. Example: >>> print(chaospy.around(lagrange_polynomial([-10, 10]), 4)) [-0.05q0+0.5, 0.05q0+0.5] >>> print(chaospy.around(lagrange_polynomial([-1, 0, 1]), 4)) [0.5q0^2-0.5q0, -q0^2+1.0, 0.5q0^2+0.5q0] >>> poly = lagrange_polynomial([[1, 0, 1], [0, 1, 2]]) >>> print(chaospy.around(poly, 4)) [0.5q0-0.5q1+0.5, -q0+1.0, 0.5q0+0.5q1-0.5] >>> print(numpy.around(poly([1, 0, 1], [0, 1, 2]), 4)) [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]]
2.844051
2.937759
0.968102
samples = numpy.asarray(samples) if lo is None: lo = samples.min() if up is None: up = samples.max() try: #construct the kernel density estimator dist = sample_dist(samples, lo, up) #raised by gaussian_kde if dataset is singular matrix except numpy.linalg.LinAlgError: dist = Uniform(lower=-numpy.inf, upper=numpy.inf) return dist
def SampleDist(samples, lo=None, up=None)
Distribution based on samples. Estimates a distribution from the given samples by constructing a kernel density estimator (KDE). Args: samples: Sample values to construction of the KDE lo (float) : Location of lower threshold up (float) : Location of upper threshold Example: >>> distribution = chaospy.SampleDist([0, 1, 1, 1, 2]) >>> print(distribution) sample_dist(lo=0, up=2) >>> q = numpy.linspace(0, 1, 5) >>> print(numpy.around(distribution.inv(q), 4)) [0. 0.6016 1. 1.3984 2. ] >>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4)) [0. 0.25 0.5 0.75 1. ] >>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4)) [0.2254 0.4272 0.5135 0.4272 0.2254] >>> print(numpy.around(distribution.sample(4), 4)) # doctest: +SKIP [-0.4123 1.1645 -0.0131 1.3302] >>> print(numpy.around(distribution.mom(1), 4)) 1.0 >>> print(numpy.around(distribution.ttr([1, 2, 3]), 4)) [[1.3835 0.7983 1.1872] [0.2429 0.2693 0.4102]]
4.553177
5.559444
0.818999
size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple,list,numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size out = self.kernel.resample(size_)[0] try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, out.size/dim) return out
def sample(self, size=(), rule="R", antithetic=None, verbose=False, **kws)
Overwrite sample() function, because the constructed Dist that is based on the KDE is only working with the random sampling that is given by the KDE itself.
3.318597
3.224782
1.029092
mat_ref = numpy.asfarray(mat) mat = mat_ref.copy() diag_max = numpy.diag(mat).max() assert len(mat.shape) == 2 size = len(mat) hitri = numpy.zeros((size, size)) piv = numpy.arange(size) for idx in range(size): idx_max = numpy.argmax(numpy.diag(mat[idx:, idx:])) + idx if mat[idx_max, idx_max] <= numpy.abs(diag_max*eps): if not idx: raise ValueError("Purly negative definite") for j in range(idx, size): hitri[j, j] = hitri[j-1, j-1]/float(j) break tmp = mat[:, idx].copy() mat[:, idx] = mat[:, idx_max] mat[:, idx_max] = tmp tmp = hitri[:, idx].copy() hitri[:, idx] = hitri[:, idx_max] hitri[:, idx_max] = tmp tmp = mat[idx, :].copy() mat[idx, :] = mat[idx_max, :] mat[idx_max, :] = tmp piv[idx], piv[idx_max] = piv[idx_max], piv[idx] hitri[idx, idx] = numpy.sqrt(mat[idx, idx]) rval = mat[idx, idx+1:]/hitri[idx, idx] hitri[idx, idx+1:] = rval mat[idx+1:, idx+1:] -= numpy.outer(rval, rval) perm = numpy.zeros((size, size), dtype=int) for idx in range(size): perm[idx, piv[idx]] = 1 return perm, hitri.T
def bastos_ohagen(mat, eps=1e-16)
Bastos-O'Hagen algorithm for modified Cholesky decomposition. Args: mat (numpy.ndarray): Input matrix to decompose. Assumed to close to positive definite. eps (float): Tolerance value for the eigenvalues. Values smaller than `tol*numpy.diag(mat).max()` are considered to be zero. Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): perm: Permutation matrix lowtri: Upper triangular decomposition errors: Error matrix Examples: >>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]] >>> perm, lowtri = bastos_ohagen(mat) >>> print(perm) [[0 1 0] [1 0 0] [0 0 1]] >>> print(numpy.around(lowtri, 4)) [[ 2.4495 0. 0. ] [ 0.8165 1.8257 0. ] [ 1.2247 -0. 0.9129]] >>> comp = numpy.dot(perm, lowtri) >>> print(numpy.around(numpy.dot(comp, comp.T), 4)) [[4. 2. 1. ] [2. 6. 3. ] [1. 3. 2.3333]]
2.626387
2.568384
1.022583