Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_regression.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_array_almost_equal import scipy.ndimage as ndimage def test_byte_order_median(): """Regression test for #413: median_filter does not handle bytes orders.""" a = np.arange(9, dtype='<f4').reshape(3, 3) ref = ndimage.filters.median_filter(a,(3, 3)) b = np.arange(9, dtype='>f4').reshape(3, 3) t = ndimage.filters.median_filter(b, (3, 3)) assert_array_almost_equal(ref, t) def test_zoom_output_shape(): """Ticket #643""" x = np.arange(12).reshape((3,4)) ndimage.zoom(x, 2, output=np.zeros((6,8))) def test_ticket_742(): def SE(img, thresh=.7, size=4): mask = img > thresh rank = len(mask.shape) la, co = ndimage.label(mask, ndimage.generate_binary_structure(rank, rank)) slices = ndimage.find_objects(la) if np.dtype(np.intp) != np.dtype('i'): shape = (3,1240,1240) a = np.random.rand(np.product(shape)).reshape(shape) # shouldn't crash SE(a) def test_gh_issue_3025(): """Github issue #3025 - improper merging of labels""" d = np.zeros((60,320)) d[:,:257] = 1 d[:,260:] = 1 d[36,257] = 1 d[35,258] = 1 d[35,259] = 1 assert ndimage.label(d, np.ones((3,3)))[1] == 1
1,360
27.354167
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_measurements.py
from __future__ import division, print_function, absolute_import import os.path import numpy as np from numpy.testing import (assert_, assert_array_almost_equal, assert_equal, assert_almost_equal, assert_array_equal) from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings import scipy.ndimage as ndimage types = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, np.float32, np.float64] np.mod(1., 1) # Silence fmod bug on win-amd64. See #1408 and #1238. class Test_measurements_stats(object): """ndimage.measurements._stats() is a utility function used by other functions.""" def test_a(self): x = [0,1,2,6] labels = [0,0,1,1] index = [0,1] for shp in [(4,), (2,2)]: x = np.array(x).reshape(shp) labels = np.array(labels).reshape(shp) counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) assert_array_equal(counts, [2, 2]) assert_array_equal(sums, [1.0, 8.0]) def test_b(self): # Same data as test_a, but different labels. The label 9 exceeds the # length of 'labels', so this test will follow a different code path. x = [0,1,2,6] labels = [0,0,9,9] index = [0,9] for shp in [(4,), (2,2)]: x = np.array(x).reshape(shp) labels = np.array(labels).reshape(shp) counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) assert_array_equal(counts, [2, 2]) assert_array_equal(sums, [1.0, 8.0]) def test_a_centered(self): x = [0,1,2,6] labels = [0,0,1,1] index = [0,1] for shp in [(4,), (2,2)]: x = np.array(x).reshape(shp) labels = np.array(labels).reshape(shp) counts, sums, centers = ndimage.measurements._stats(x, labels=labels, index=index, centered=True) assert_array_equal(counts, [2, 2]) assert_array_equal(sums, [1.0, 8.0]) assert_array_equal(centers, [0.5, 8.0]) def test_b_centered(self): x = [0,1,2,6] labels = [0,0,9,9] index = [0,9] for shp in [(4,), (2,2)]: x = np.array(x).reshape(shp) labels = np.array(labels).reshape(shp) counts, sums, centers = ndimage.measurements._stats(x, labels=labels, index=index, centered=True) assert_array_equal(counts, [2, 2]) assert_array_equal(sums, [1.0, 8.0]) assert_array_equal(centers, [0.5, 8.0]) def test_nonint_labels(self): x = [0,1,2,6] labels = [0.0, 0.0, 9.0, 9.0] index = [0.0, 9.0] for shp in [(4,), (2,2)]: x = np.array(x).reshape(shp) labels = np.array(labels).reshape(shp) counts, sums, centers = ndimage.measurements._stats(x, labels=labels, index=index, centered=True) assert_array_equal(counts, [2, 2]) assert_array_equal(sums, [1.0, 8.0]) assert_array_equal(centers, [0.5, 8.0]) class Test_measurements_select(object): """ndimage.measurements._select() is a utility function used by other functions.""" def test_basic(self): x = [0,1,6,2] cases = [ ([0,0,1,1], [0,1]), # "Small" integer labels ([0,0,9,9], [0,9]), # A label larger than len(labels) ([0.0,0.0,7.0,7.0], [0.0, 7.0]), # Non-integer labels ] for labels, index in cases: result = ndimage.measurements._select(x, labels=labels, index=index) assert_(len(result) == 0) result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True) assert_(len(result) == 1) assert_array_equal(result[0], [1, 6]) result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True) assert_(len(result) == 1) assert_array_equal(result[0], [0, 2]) result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True, find_min_positions=True) assert_(len(result) == 2) assert_array_equal(result[0], [0, 2]) assert_array_equal(result[1], [0, 3]) assert_equal(result[1].dtype.kind, 'i') result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True, find_max_positions=True) assert_(len(result) == 2) assert_array_equal(result[0], [1, 6]) assert_array_equal(result[1], [1, 2]) assert_equal(result[1].dtype.kind, 'i') def test_label01(): data = np.ones([]) out, n = ndimage.label(data) assert_array_almost_equal(out, 1) assert_equal(n, 1) def test_label02(): data = np.zeros([]) out, n = ndimage.label(data) assert_array_almost_equal(out, 0) assert_equal(n, 0) def test_label03(): data = np.ones([1]) out, n = ndimage.label(data) assert_array_almost_equal(out, [1]) assert_equal(n, 1) def test_label04(): data = np.zeros([1]) out, n = ndimage.label(data) assert_array_almost_equal(out, [0]) assert_equal(n, 0) def test_label05(): data = np.ones([5]) out, n = ndimage.label(data) assert_array_almost_equal(out, [1, 1, 1, 1, 1]) assert_equal(n, 1) def test_label06(): data = np.array([1, 0, 1, 1, 0, 1]) out, n = ndimage.label(data) assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3]) assert_equal(n, 3) def test_label07(): data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) out, n = ndimage.label(data) assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) assert_equal(n, 0) def test_label08(): data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]]) out, n = ndimage.label(data) assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [0, 0, 0, 4, 4, 0]]) assert_equal(n, 4) def test_label09(): data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]]) struct = ndimage.generate_binary_structure(2, 2) out, n = ndimage.label(data, struct) assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [2, 2, 0, 0, 0, 0], [2, 2, 0, 0, 0, 0], [0, 0, 0, 3, 3, 0]]) assert_equal(n, 3) def test_label10(): data = np.array([[0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]) struct = ndimage.generate_binary_structure(2, 2) out, n = ndimage.label(data, struct) assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]) assert_equal(n, 1) def test_label11(): for type in types: data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]], type) out, n = ndimage.label(data) expected = [[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [0, 0, 0, 4, 4, 0]] assert_array_almost_equal(out, expected) assert_equal(n, 4) def test_label11_inplace(): for type in types: data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]], type) n = ndimage.label(data, output=data) expected = [[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [0, 0, 0, 4, 4, 0]] assert_array_almost_equal(data, expected) assert_equal(n, 4) def test_label12(): for type in types: data = np.array([[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 0]], type) out, n = ndimage.label(data) expected = [[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 0]] assert_array_almost_equal(out, expected) assert_equal(n, 1) def test_label13(): for type in types: data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], type) out, n = ndimage.label(data) expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] assert_array_almost_equal(out, expected) assert_equal(n, 1) def test_label_output_typed(): data = np.ones([5]) for t in types: output = np.zeros([5], dtype=t) n = ndimage.label(data, output=output) assert_array_almost_equal(output, 1) assert_equal(n, 1) def test_label_output_dtype(): data = np.ones([5]) for t in types: output, n = ndimage.label(data, output=t) assert_array_almost_equal(output, 1) assert output.dtype == t def test_label_output_wrong_size(): data = np.ones([5]) for t in types: output = np.zeros([10], t) assert_raises((RuntimeError, ValueError), ndimage.label, data, output=output) def test_label_structuring_elements(): data = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_inputs.txt")) strels = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_strels.txt")) results = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_results.txt")) data = data.reshape((-1, 7, 7)) strels = strels.reshape((-1, 3, 3)) results = results.reshape((-1, 7, 7)) r = 0 for i in range(data.shape[0]): d = data[i, :, :] for j in range(strels.shape[0]): s = strels[j, :, :] assert_equal(ndimage.label(d, s)[0], results[r, :, :]) r += 1 def test_label_default_dtype(): test_array = np.random.rand(10, 10) label, no_features = ndimage.label(test_array > 0.5) assert_(label.dtype in (np.int32, np.int64)) # Shouldn't raise an exception ndimage.find_objects(label) def test_find_objects01(): data = np.ones([], dtype=int) out = ndimage.find_objects(data) assert_(out == [()]) def test_find_objects02(): data = np.zeros([], dtype=int) out = ndimage.find_objects(data) assert_(out == []) def test_find_objects03(): data = np.ones([1], dtype=int) out = ndimage.find_objects(data) assert_equal(out, [(slice(0, 1, None),)]) def test_find_objects04(): data = np.zeros([1], dtype=int) out = ndimage.find_objects(data) assert_equal(out, []) def test_find_objects05(): data = np.ones([5], dtype=int) out = ndimage.find_objects(data) assert_equal(out, [(slice(0, 5, None),)]) def test_find_objects06(): data = np.array([1, 0, 2, 2, 0, 3]) out = ndimage.find_objects(data) assert_equal(out, [(slice(0, 1, None),), (slice(2, 4, None),), (slice(5, 6, None),)]) def test_find_objects07(): data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) out = ndimage.find_objects(data) assert_equal(out, []) def test_find_objects08(): data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [0, 0, 0, 4, 4, 0]]) out = ndimage.find_objects(data) assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), (slice(1, 3, None), slice(2, 5, None)), (slice(3, 5, None), slice(0, 2, None)), (slice(5, 6, None), slice(3, 5, None))]) def test_find_objects09(): data = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 2, 2, 0, 0], [0, 0, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 4, 0]]) out = ndimage.find_objects(data) assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), (slice(1, 3, None), slice(2, 5, None)), None, (slice(5, 6, None), slice(3, 5, None))]) def test_sum01(): for type in types: input = np.array([], type) output = ndimage.sum(input) assert_equal(output, 0.0) def test_sum02(): for type in types: input = np.zeros([0, 4], type) output = ndimage.sum(input) assert_equal(output, 0.0) def test_sum03(): for type in types: input = np.ones([], type) output = ndimage.sum(input) assert_almost_equal(output, 1.0) def test_sum04(): for type in types: input = np.array([1, 2], type) output = ndimage.sum(input) assert_almost_equal(output, 3.0) def test_sum05(): for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.sum(input) assert_almost_equal(output, 10.0) def test_sum06(): labels = np.array([], bool) for type in types: input = np.array([], type) output = ndimage.sum(input, labels=labels) assert_equal(output, 0.0) def test_sum07(): labels = np.ones([0, 4], bool) for type in types: input = np.zeros([0, 4], type) output = ndimage.sum(input, labels=labels) assert_equal(output, 0.0) def test_sum08(): labels = np.array([1, 0], bool) for type in types: input = np.array([1, 2], type) output = ndimage.sum(input, labels=labels) assert_equal(output, 1.0) def test_sum09(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.sum(input, labels=labels) assert_almost_equal(output, 4.0) def test_sum10(): labels = np.array([1, 0], bool) input = np.array([[1, 2], [3, 4]], bool) output = ndimage.sum(input, labels=labels) assert_almost_equal(output, 2.0) def test_sum11(): labels = np.array([1, 2], np.int8) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.sum(input, labels=labels, index=2) assert_almost_equal(output, 6.0) def test_sum12(): labels = np.array([[1, 2], [2, 4]], np.int8) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.sum(input, labels=labels, index=[4, 8, 2]) assert_array_almost_equal(output, [4.0, 0.0, 5.0]) def test_mean01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.mean(input, labels=labels) assert_almost_equal(output, 2.0) def test_mean02(): labels = np.array([1, 0], bool) input = np.array([[1, 2], [3, 4]], bool) output = ndimage.mean(input, labels=labels) assert_almost_equal(output, 1.0) def test_mean03(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.mean(input, labels=labels, index=2) assert_almost_equal(output, 3.0) def test_mean04(): labels = np.array([[1, 2], [2, 4]], np.int8) olderr = np.seterr(all='ignore') try: for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.mean(input, labels=labels, index=[4, 8, 2]) assert_array_almost_equal(output[[0,2]], [4.0, 2.5]) assert_(np.isnan(output[1])) finally: np.seterr(**olderr) def test_minimum01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels) assert_almost_equal(output, 1.0) def test_minimum02(): labels = np.array([1, 0], bool) input = np.array([[2, 2], [2, 4]], bool) output = ndimage.minimum(input, labels=labels) assert_almost_equal(output, 1.0) def test_minimum03(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels, index=2) assert_almost_equal(output, 2.0) def test_minimum04(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output, [2.0, 4.0, 0.0]) def test_maximum01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.maximum(input, labels=labels) assert_almost_equal(output, 3.0) def test_maximum02(): labels = np.array([1, 0], bool) input = np.array([[2, 2], [2, 4]], bool) output = ndimage.maximum(input, labels=labels) assert_almost_equal(output, 1.0) def test_maximum03(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.maximum(input, labels=labels, index=2) assert_almost_equal(output, 4.0) def test_maximum04(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.maximum(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output, [3.0, 4.0, 0.0]) def test_maximum05(): # Regression test for ticket #501 (Trac) x = np.array([-3,-2,-1]) assert_equal(ndimage.maximum(x),-1) def test_median01(): a = np.array([[1, 2, 0, 1], [5, 3, 0, 4], [0, 0, 0, 7], [9, 3, 0, 0]]) labels = np.array([[1, 1, 0, 2], [1, 1, 0, 2], [0, 0, 0, 2], [3, 3, 0, 0]]) output = ndimage.median(a, labels=labels, index=[1, 2, 3]) assert_array_almost_equal(output, [2.5, 4.0, 6.0]) def test_median02(): a = np.array([[1, 2, 0, 1], [5, 3, 0, 4], [0, 0, 0, 7], [9, 3, 0, 0]]) output = ndimage.median(a) assert_almost_equal(output, 1.0) def test_median03(): a = np.array([[1, 2, 0, 1], [5, 3, 0, 4], [0, 0, 0, 7], [9, 3, 0, 0]]) labels = np.array([[1, 1, 0, 2], [1, 1, 0, 2], [0, 0, 0, 2], [3, 3, 0, 0]]) output = ndimage.median(a, labels=labels) assert_almost_equal(output, 3.0) def test_variance01(): olderr = np.seterr(all='ignore') try: for type in types: input = np.array([], type) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Mean of empty slice") output = ndimage.variance(input) assert_(np.isnan(output)) finally: np.seterr(**olderr) def test_variance02(): for type in types: input = np.array([1], type) output = ndimage.variance(input) assert_almost_equal(output, 0.0) def test_variance03(): for type in types: input = np.array([1, 3], type) output = ndimage.variance(input) assert_almost_equal(output, 1.0) def test_variance04(): input = np.array([1, 0], bool) output = ndimage.variance(input) assert_almost_equal(output, 0.25) def test_variance05(): labels = [2, 2, 3] for type in types: input = np.array([1, 3, 8], type) output = ndimage.variance(input, labels, 2) assert_almost_equal(output, 1.0) def test_variance06(): labels = [2, 2, 3, 3, 4] olderr = np.seterr(all='ignore') try: for type in types: input = np.array([1, 3, 8, 10, 8], type) output = ndimage.variance(input, labels, [2, 3, 4]) assert_array_almost_equal(output, [1.0, 1.0, 0.0]) finally: np.seterr(**olderr) def test_standard_deviation01(): olderr = np.seterr(all='ignore') try: for type in types: input = np.array([], type) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Mean of empty slice") output = ndimage.standard_deviation(input) assert_(np.isnan(output)) finally: np.seterr(**olderr) def test_standard_deviation02(): for type in types: input = np.array([1], type) output = ndimage.standard_deviation(input) assert_almost_equal(output, 0.0) def test_standard_deviation03(): for type in types: input = np.array([1, 3], type) output = ndimage.standard_deviation(input) assert_almost_equal(output, np.sqrt(1.0)) def test_standard_deviation04(): input = np.array([1, 0], bool) output = ndimage.standard_deviation(input) assert_almost_equal(output, 0.5) def test_standard_deviation05(): labels = [2, 2, 3] for type in types: input = np.array([1, 3, 8], type) output = ndimage.standard_deviation(input, labels, 2) assert_almost_equal(output, 1.0) def test_standard_deviation06(): labels = [2, 2, 3, 3, 4] olderr = np.seterr(all='ignore') try: for type in types: input = np.array([1, 3, 8, 10, 8], type) output = ndimage.standard_deviation(input, labels, [2, 3, 4]) assert_array_almost_equal(output, [1.0, 1.0, 0.0]) finally: np.seterr(**olderr) def test_standard_deviation07(): labels = [1] olderr = np.seterr(all='ignore') try: for type in types: input = np.array([-0.00619519], type) output = ndimage.standard_deviation(input, labels, [1]) assert_array_almost_equal(output, [0]) finally: np.seterr(**olderr) def test_minimum_position01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum_position(input, labels=labels) assert_equal(output, (0, 0)) def test_minimum_position02(): for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input) assert_equal(output, (1, 2)) def test_minimum_position03(): input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], bool) output = ndimage.minimum_position(input) assert_equal(output, (1, 2)) def test_minimum_position04(): input = np.array([[5, 4, 2, 5], [3, 7, 1, 2], [1, 5, 1, 1]], bool) output = ndimage.minimum_position(input) assert_equal(output, (0, 0)) def test_minimum_position05(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 2, 3]], type) output = ndimage.minimum_position(input, labels) assert_equal(output, (2, 0)) def test_minimum_position06(): labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, 2) assert_equal(output, (0, 1)) def test_minimum_position07(): labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, [2, 3]) assert_equal(output[0], (0, 1)) assert_equal(output[1], (1, 2)) def test_maximum_position01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.maximum_position(input, labels=labels) assert_equal(output, (1, 0)) def test_maximum_position02(): for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output = ndimage.maximum_position(input) assert_equal(output, (1, 2)) def test_maximum_position03(): input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], bool) output = ndimage.maximum_position(input) assert_equal(output, (0, 0)) def test_maximum_position04(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output = ndimage.maximum_position(input, labels) assert_equal(output, (1, 1)) def test_maximum_position05(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output = ndimage.maximum_position(input, labels, 1) assert_equal(output, (0, 0)) def test_maximum_position06(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output = ndimage.maximum_position(input, labels, [1, 2]) assert_equal(output[0], (0, 0)) assert_equal(output[1], (1, 1)) def test_maximum_position07(): # Test float labels labels = np.array([1.0, 2.5, 0.0, 4.5]) for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output = ndimage.maximum_position(input, labels, [1.0, 4.5]) assert_equal(output[0], (0, 0)) assert_equal(output[1], (0, 3)) def test_extrema01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels) output2 = ndimage.minimum(input, labels=labels) output3 = ndimage.maximum(input, labels=labels) output4 = ndimage.minimum_position(input, labels=labels) output5 = ndimage.maximum_position(input, labels=labels) assert_equal(output1, (output2, output3, output4, output5)) def test_extrema02(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=2) output2 = ndimage.minimum(input, labels=labels, index=2) output3 = ndimage.maximum(input, labels=labels, index=2) output4 = ndimage.minimum_position(input, labels=labels, index=2) output5 = ndimage.maximum_position(input, labels=labels, index=2) assert_equal(output1, (output2, output3, output4, output5)) def test_extrema03(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=[2, 3, 8]) output2 = ndimage.minimum(input, labels=labels, index=[2, 3, 8]) output3 = ndimage.maximum(input, labels=labels, index=[2, 3, 8]) output4 = ndimage.minimum_position(input, labels=labels, index=[2, 3, 8]) output5 = ndimage.maximum_position(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5) def test_extrema04(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output1 = ndimage.extrema(input, labels, [1, 2]) output2 = ndimage.minimum(input, labels, [1, 2]) output3 = ndimage.maximum(input, labels, [1, 2]) output4 = ndimage.minimum_position(input, labels, [1, 2]) output5 = ndimage.maximum_position(input, labels, [1, 2]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5) def test_center_of_mass01(): expected = [0.0, 0.0] for type in types: input = np.array([[1, 0], [0, 0]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass02(): expected = [1, 0] for type in types: input = np.array([[0, 0], [1, 0]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass03(): expected = [0, 1] for type in types: input = np.array([[0, 1], [0, 0]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass04(): expected = [1, 1] for type in types: input = np.array([[0, 0], [0, 1]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass05(): expected = [0.5, 0.5] for type in types: input = np.array([[1, 1], [1, 1]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass06(): expected = [0.5, 0.5] input = np.array([[1, 2], [3, 1]], bool) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected) def test_center_of_mass07(): labels = [1, 0] expected = [0.5, 0.0] input = np.array([[1, 2], [3, 1]], bool) output = ndimage.center_of_mass(input, labels) assert_array_almost_equal(output, expected) def test_center_of_mass08(): labels = [1, 2] expected = [0.5, 1.0] input = np.array([[5, 2], [3, 1]], bool) output = ndimage.center_of_mass(input, labels, 2) assert_array_almost_equal(output, expected) def test_center_of_mass09(): labels = [1, 2] expected = [(0.5, 0.0), (0.5, 1.0)] input = np.array([[1, 2], [1, 1]], bool) output = ndimage.center_of_mass(input, labels, [1, 2]) assert_array_almost_equal(output, expected) def test_histogram01(): expected = np.ones(10) input = np.arange(10) output = ndimage.histogram(input, 0, 10, 10) assert_array_almost_equal(output, expected) def test_histogram02(): labels = [1, 1, 1, 1, 2, 2, 2, 2] expected = [0, 2, 0, 1, 1] input = np.array([1, 1, 3, 4, 3, 3, 3, 3]) output = ndimage.histogram(input, 0, 4, 5, labels, 1) assert_array_almost_equal(output, expected) def test_histogram03(): labels = [1, 0, 1, 1, 2, 2, 2, 2] expected1 = [0, 1, 0, 1, 1] expected2 = [0, 0, 0, 3, 0] input = np.array([1, 1, 3, 4, 3, 5, 3, 3]) output = ndimage.histogram(input, 0, 4, 5, labels, (1,2)) assert_array_almost_equal(output[0], expected1) assert_array_almost_equal(output[1], expected2) def test_stat_funcs_2d(): a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]]) lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]]) mean = ndimage.mean(a, labels=lbl, index=[1, 2]) assert_array_equal(mean, [7.0, 4.0]) var = ndimage.variance(a, labels=lbl, index=[1, 2]) assert_array_equal(var, [2.5, 1.0]) std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) med = ndimage.median(a, labels=lbl, index=[1, 2]) assert_array_equal(med, [7.0, 4.0]) min = ndimage.minimum(a, labels=lbl, index=[1, 2]) assert_array_equal(min, [5, 3]) max = ndimage.maximum(a, labels=lbl, index=[1, 2]) assert_array_equal(max, [9, 5])
36,055
31.570912
95
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_filters.py
''' Some tests for filters ''' from __future__ import division, print_function, absolute_import import sys import numpy as np from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, assert_almost_equal) from pytest import raises as assert_raises import scipy.ndimage as sndi from scipy.ndimage.filters import _gaussian_kernel1d def test_ticket_701(): # Test generic filter sizes arr = np.arange(4).reshape((2,2)) func = lambda x: np.min(x) res = sndi.generic_filter(arr, func, size=(1,1)) # The following raises an error unless ticket 701 is fixed res2 = sndi.generic_filter(arr, func, size=1) assert_equal(res, res2) def test_gh_5430(): # At least one of these raises an error unless gh-5430 is # fixed. In py2k an int is implemented using a C long, so # which one fails depends on your system. In py3k there is only # one arbitrary precision integer type, so both should fail. sigma = np.int32(1) out = sndi._ni_support._normalize_sequence(sigma, 1) assert_equal(out, [sigma]) sigma = np.int64(1) out = sndi._ni_support._normalize_sequence(sigma, 1) assert_equal(out, [sigma]) # This worked before; make sure it still works sigma = 1 out = sndi._ni_support._normalize_sequence(sigma, 1) assert_equal(out, [sigma]) # This worked before; make sure it still works sigma = [1, 1] out = sndi._ni_support._normalize_sequence(sigma, 2) assert_equal(out, sigma) # Also include the OPs original example to make sure we fixed the issue x = np.random.normal(size=(256, 256)) perlin = np.zeros_like(x) for i in 2**np.arange(6): perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2 # This also fixes gh-4106, show that the OPs example now runs. x = np.int64(21) sndi._ni_support._normalize_sequence(x, 0) def test_gaussian_kernel1d(): radius = 10 sigma = 2 sigma2 = sigma * sigma x = np.arange(-radius, radius + 1, dtype=np.double) phi_x = np.exp(-0.5 * x * x / sigma2) phi_x /= phi_x.sum() assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius)) assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius)) assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2, _gaussian_kernel1d(sigma, 2, radius)) assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2), _gaussian_kernel1d(sigma, 3, radius)) def test_orders_gauss(): # Check order inputs to Gaussians arr = np.zeros((1,)) assert_equal(0, sndi.gaussian_filter(arr, 1, order=0)) assert_equal(0, sndi.gaussian_filter(arr, 1, order=3)) assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1) assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)) assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)) assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1) def test_valid_origins(): """Regression test for #1311.""" func = lambda x: np.mean(x) data = np.array([1,2,3,4,5], dtype=np.float64) assert_raises(ValueError, sndi.generic_filter, data, func, size=3, origin=2) func2 = lambda x, y: np.mean(x + y) assert_raises(ValueError, sndi.generic_filter1d, data, func, filter_size=3, origin=2) assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, origin=2) for filter in [sndi.uniform_filter, sndi.minimum_filter, sndi.maximum_filter, sndi.maximum_filter1d, sndi.median_filter, sndi.minimum_filter1d]: # This should work, since for size == 3, the valid range for origin is # -1 to 1. list(filter(data, 3, origin=-1)) list(filter(data, 3, origin=1)) # Just check this raises an error instead of silently accepting or # segfaulting. assert_raises(ValueError, filter, data, 3, origin=2) def test_multiple_modes(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying a single mode. arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) mode1 = 'reflect' mode2 = ['reflect', 'reflect'] assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), sndi.gaussian_filter(arr, 1, mode=mode2)) assert_equal(sndi.prewitt(arr, mode=mode1), sndi.prewitt(arr, mode=mode2)) assert_equal(sndi.sobel(arr, mode=mode1), sndi.sobel(arr, mode=mode2)) assert_equal(sndi.laplace(arr, mode=mode1), sndi.laplace(arr, mode=mode2)) assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), sndi.gaussian_laplace(arr, 1, mode=mode2)) assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), sndi.maximum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), sndi.minimum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), sndi.uniform_filter(arr, 5, mode=mode2)) def test_multiple_modes_sequentially(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying the filters with # different modes sequentially arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) modes = ['reflect', 'wrap'] expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0]) expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1]) assert_equal(expected, sndi.gaussian_filter(arr, 1, mode=modes)) expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0]) expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1]) assert_equal(expected, sndi.uniform_filter(arr, 5, mode=modes)) expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.maximum_filter(arr, size=5, mode=modes)) expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.minimum_filter(arr, size=5, mode=modes)) def test_multiple_modes_prewitt(): # Test prewitt filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[1., -3., 2.], [1., -2., 1.], [1., -1., 0.]]) modes = ['reflect', 'wrap'] assert_equal(expected, sndi.prewitt(arr, mode=modes)) def test_multiple_modes_sobel(): # Test sobel filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[1., -4., 3.], [2., -3., 1.], [1., -1., 0.]]) modes = ['reflect', 'wrap'] assert_equal(expected, sndi.sobel(arr, mode=modes)) def test_multiple_modes_laplace(): # Test laplace filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[-2., 2., 1.], [-2., -3., 2.], [1., 1., 0.]]) modes = ['reflect', 'wrap'] assert_equal(expected, sndi.laplace(arr, mode=modes)) def test_multiple_modes_gaussian_laplace(): # Test gaussian_laplace filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[-0.28438687, 0.01559809, 0.19773499], [-0.36630503, -0.20069774, 0.07483620], [0.15849176, 0.18495566, 0.21934094]]) modes = ['reflect', 'wrap'] assert_almost_equal(expected, sndi.gaussian_laplace(arr, 1, mode=modes)) def test_multiple_modes_gaussian_gradient_magnitude(): # Test gaussian_gradient_magnitude filter for multiple # extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[0.04928965, 0.09745625, 0.06405368], [0.23056905, 0.14025305, 0.04550846], [0.19894369, 0.14950060, 0.06796850]]) modes = ['reflect', 'wrap'] calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes) assert_almost_equal(expected, calculated) def test_multiple_modes_uniform(): # Test uniform filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[0.32, 0.40, 0.48], [0.20, 0.28, 0.32], [0.28, 0.32, 0.40]]) modes = ['reflect', 'wrap'] assert_almost_equal(expected, sndi.uniform_filter(arr, 5, mode=modes)) def test_gaussian_truncate(): # Test that Gaussian filters can be truncated at different widths. # These tests only check that the result has the expected number # of nonzero elements. arr = np.zeros((100, 100), float) arr[50, 50] = 1 num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum() assert_equal(num_nonzeros_2, 21**2) num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum() assert_equal(num_nonzeros_5, 51**2) # Test truncate when sigma is a sequence. f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5) fpos = f > 0 n0 = fpos.any(axis=0).sum() # n0 should be 2*int(2.5*3.5 + 0.5) + 1 assert_equal(n0, 19) n1 = fpos.any(axis=1).sum() # n1 should be 2*int(0.5*3.5 + 0.5) + 1 assert_equal(n1, 5) # Test gaussian_filter1d. x = np.zeros(51) x[25] = 1 f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5) n = (f > 0).sum() assert_equal(n, 15) # Test gaussian_laplace y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5) nonzero_indices = np.where(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15) # Test gaussian_gradient_magnitude y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5) nonzero_indices = np.where(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15) class TestThreading(object): def check_func_thread(self, n, fun, args, out): from threading import Thread thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)] [t.start() for t in thrds] [t.join() for t in thrds] def check_func_serial(self, n, fun, args, out): for i in range(n): fun(*args, output=out[i]) def test_correlate1d(self): d = np.random.randn(5000) os = np.empty((4, d.size)) ot = np.empty_like(os) self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os) self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot) assert_array_equal(os, ot) def test_correlate(self): d = np.random.randn(500, 500) k = np.random.randn(10, 10) os = np.empty([4] + list(d.shape)) ot = np.empty_like(os) self.check_func_serial(4, sndi.correlate, (d, k), os) self.check_func_thread(4, sndi.correlate, (d, k), ot) assert_array_equal(os, ot) def test_median_filter(self): d = np.random.randn(500, 500) os = np.empty([4] + list(d.shape)) ot = np.empty_like(os) self.check_func_serial(4, sndi.median_filter, (d, 3), os) self.check_func_thread(4, sndi.median_filter, (d, 3), ot) assert_array_equal(os, ot) def test_uniform_filter1d(self): d = np.random.randn(5000) os = np.empty((4, d.size)) ot = np.empty_like(os) self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os) self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot) assert_array_equal(os, ot) def test_minmax_filter(self): d = np.random.randn(500, 500) os = np.empty([4] + list(d.shape)) ot = np.empty_like(os) self.check_func_serial(4, sndi.maximum_filter, (d, 3), os) self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot) assert_array_equal(os, ot) self.check_func_serial(4, sndi.minimum_filter, (d, 3), os) self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot) assert_array_equal(os, ot) def test_minmaximum_filter1d(): # Regression gh-3898 in_ = np.arange(10) out = sndi.minimum_filter1d(in_, 1) assert_equal(in_, out) out = sndi.maximum_filter1d(in_, 1) assert_equal(in_, out) # Test reflect out = sndi.minimum_filter1d(in_, 5, mode='reflect') assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out) out = sndi.maximum_filter1d(in_, 5, mode='reflect') assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out) #Test constant out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1) assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out) out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10) assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out) # Test nearest out = sndi.minimum_filter1d(in_, 5, mode='nearest') assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out) out = sndi.maximum_filter1d(in_, 5, mode='nearest') assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out) # Test wrap out = sndi.minimum_filter1d(in_, 5, mode='wrap') assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out) out = sndi.maximum_filter1d(in_, 5, mode='wrap') assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out) def test_uniform_filter1d_roundoff_errors(): # gh-6930 in_ = np.repeat([0, 1, 0], [9, 9, 9]) for filter_size in range(3, 10): out = sndi.uniform_filter1d(in_, filter_size) assert_equal(out.sum(), 10 - filter_size) def test_footprint_all_zeros(): # regression test for gh-6876: footprint of all zeros segfaults arr = np.random.randint(0, 100, (100, 100)) kernel = np.zeros((3, 3), bool) with assert_raises(ValueError): sndi.maximum_filter(arr, footprint=kernel) def test_gaussian_filter(): # Test gaussian filter with np.float16 # gh-8207 data = np.array([1],dtype = np.float16) sigma = 1.0 with assert_raises(RuntimeError): sndi.gaussian_filter(data,sigma)
15,021
35.549878
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_splines.py
"""Tests for spline filtering.""" from __future__ import division, print_function, absolute_import import numpy as np import pytest from numpy.testing import assert_almost_equal from scipy import ndimage def get_spline_knot_values(order): """Knot values to the right of a B-spline's center.""" knot_values = {0: [1], 1: [1], 2: [6, 1], 3: [4, 1], 4: [230, 76, 1], 5: [66, 26, 1]} return knot_values[order] def make_spline_knot_matrix(n, order, mode='mirror'): """Matrix to invert to find the spline coefficients.""" knot_values = get_spline_knot_values(order) matrix = np.zeros((n, n)) for diag, knot_value in enumerate(knot_values): indices = np.arange(diag, n) if diag == 0: matrix[indices, indices] = knot_value else: matrix[indices, indices - diag] = knot_value matrix[indices - diag, indices] = knot_value knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:]) if mode == 'mirror': start, step = 1, 1 elif mode == 'reflect': start, step = 0, 1 elif mode == 'wrap': start, step = -1, -1 else: raise ValueError('unsupported mode {}'.format(mode)) for row in range(len(knot_values) - 1): for idx, knot_value in enumerate(knot_values[row + 1:]): matrix[row, start + step*idx] += knot_value matrix[-row - 1, -start - 1 - step*idx] += knot_value return matrix / knot_values_sum @pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5]) def test_spline_filter_vs_matrix_solution(order): n = 100 eye = np.eye(n, dtype=float) spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order) spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order) matrix = make_spline_knot_matrix(n, order) assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix)) assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
2,058
30.676923
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_datatypes.py
""" Testing data types for ndimage calls """ from __future__ import division, print_function, absolute_import import sys import numpy as np from numpy.testing import assert_array_almost_equal, assert_equal, assert_ import pytest from scipy import ndimage def test_map_coordinates_dts(): # check that ndimage accepts different data types for interpolation data = np.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) shifted_data = np.array([[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) idx = np.indices(data.shape) dts = (np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.intp, np.uintp, np.float32, np.float64) for order in range(0, 6): for data_dt in dts: these_data = data.astype(data_dt) for coord_dt in dts: # affine mapping mat = np.eye(2, dtype=coord_dt) off = np.zeros((2,), dtype=coord_dt) out = ndimage.affine_transform(these_data, mat, off) assert_array_almost_equal(these_data, out) # map coordinates coords_m1 = idx.astype(coord_dt) - 1 coords_p10 = idx.astype(coord_dt) + 10 out = ndimage.map_coordinates(these_data, coords_m1, order=order) assert_array_almost_equal(out, shifted_data) # check constant fill works out = ndimage.map_coordinates(these_data, coords_p10, order=order) assert_array_almost_equal(out, np.zeros((3,4))) # check shift and zoom out = ndimage.shift(these_data, 1) assert_array_almost_equal(out, shifted_data) out = ndimage.zoom(these_data, 1) assert_array_almost_equal(these_data, out) @pytest.mark.xfail(not sys.platform == 'darwin', reason="runs only on darwin") def test_uint64_max(): # Test interpolation respects uint64 max. Reported to fail at least on # win32 (due to the 32 bit visual C compiler using signed int64 when # converting between uint64 to double) and Debian on s390x. # Interpolation is always done in double precision floating point, so we # use the largest uint64 value for which int(float(big)) still fits in # a uint64. big = 2**64-1025 arr = np.array([big, big, big], dtype=np.uint64) # Tests geometric transform (map_coordinates, affine_transform) inds = np.indices(arr.shape) - 0.1 x = ndimage.map_coordinates(arr, inds) assert_equal(x[1], int(float(big))) assert_equal(x[2], int(float(big))) # Tests zoom / shift x = ndimage.shift(arr, 0.1) assert_equal(x[1], int(float(big))) assert_equal(x[2], int(float(big)))
2,832
40.057971
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_ndimage.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import math import sys import numpy from numpy import fft from numpy.testing import (assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_almost_equal) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings import scipy.ndimage as ndimage eps = 1e-12 def sumsq(a, b): return math.sqrt(((a - b)**2).sum()) class TestNdimage: def setup_method(self): # list of numarray data types self.integer_types = [ numpy.int8, numpy.uint8, numpy.int16, numpy.uint16, numpy.int32, numpy.uint32, numpy.int64, numpy.uint64] self.float_types = [numpy.float32, numpy.float64] self.types = self.integer_types + self.float_types # list of boundary modes: self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant'] def test_correlate01(self): array = numpy.array([1, 2]) weights = numpy.array([2]) expected = [2, 4] output = ndimage.correlate(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, expected) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, expected) def test_correlate02(self): array = numpy.array([1, 2, 3]) kernel = numpy.array([1]) output = ndimage.correlate(array, kernel) assert_array_almost_equal(array, output) output = ndimage.convolve(array, kernel) assert_array_almost_equal(array, output) output = ndimage.correlate1d(array, kernel) assert_array_almost_equal(array, output) output = ndimage.convolve1d(array, kernel) assert_array_almost_equal(array, output) def test_correlate03(self): array = numpy.array([1]) weights = numpy.array([1, 1]) expected = [2] output = ndimage.correlate(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, expected) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, expected) def test_correlate04(self): array = numpy.array([1, 2]) tcor = [2, 3] tcov = [3, 4] weights = numpy.array([1, 1]) output = ndimage.correlate(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, tcov) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, tcov) def test_correlate05(self): array = numpy.array([1, 2, 3]) tcor = [2, 3, 5] tcov = [3, 5, 6] kernel = numpy.array([1, 1]) output = ndimage.correlate(array, kernel) assert_array_almost_equal(tcor, output) output = ndimage.convolve(array, kernel) assert_array_almost_equal(tcov, output) output = ndimage.correlate1d(array, kernel) assert_array_almost_equal(tcor, output) output = ndimage.convolve1d(array, kernel) assert_array_almost_equal(tcov, output) def test_correlate06(self): array = numpy.array([1, 2, 3]) tcor = [9, 14, 17] tcov = [7, 10, 15] weights = numpy.array([1, 2, 3]) output = ndimage.correlate(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, tcov) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, tcov) def test_correlate07(self): array = numpy.array([1, 2, 3]) expected = [5, 8, 11] weights = numpy.array([1, 2, 1]) output = ndimage.correlate(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, expected) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, expected) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, expected) def test_correlate08(self): array = numpy.array([1, 2, 3]) tcor = [1, 2, 5] tcov = [3, 6, 7] weights = numpy.array([1, 2, -1]) output = ndimage.correlate(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve(array, weights) assert_array_almost_equal(output, tcov) output = ndimage.correlate1d(array, weights) assert_array_almost_equal(output, tcor) output = ndimage.convolve1d(array, weights) assert_array_almost_equal(output, tcov) def test_correlate09(self): array = [] kernel = numpy.array([1, 1]) output = ndimage.correlate(array, kernel) assert_array_almost_equal(array, output) output = ndimage.convolve(array, kernel) assert_array_almost_equal(array, output) output = ndimage.correlate1d(array, kernel) assert_array_almost_equal(array, output) output = ndimage.convolve1d(array, kernel) assert_array_almost_equal(array, output) def test_correlate10(self): array = [[]] kernel = numpy.array([[1, 1]]) output = ndimage.correlate(array, kernel) assert_array_almost_equal(array, output) output = ndimage.convolve(array, kernel) assert_array_almost_equal(array, output) def test_correlate11(self): array = numpy.array([[1, 2, 3], [4, 5, 6]]) kernel = numpy.array([[1, 1], [1, 1]]) output = ndimage.correlate(array, kernel) assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) output = ndimage.convolve(array, kernel) assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output) def test_correlate12(self): array = numpy.array([[1, 2, 3], [4, 5, 6]]) kernel = numpy.array([[1, 0], [0, 1]]) output = ndimage.correlate(array, kernel) assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) output = ndimage.convolve(array, kernel) assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) def test_correlate13(self): kernel = numpy.array([[1, 0], [0, 1]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) for type2 in self.types: output = ndimage.correlate(array, kernel, output=type2) assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) assert_equal(output.dtype.type, type2) output = ndimage.convolve(array, kernel, output=type2) assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) assert_equal(output.dtype.type, type2) def test_correlate14(self): kernel = numpy.array([[1, 0], [0, 1]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) for type2 in self.types: output = numpy.zeros(array.shape, type2) ndimage.correlate(array, kernel, output=output) assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) assert_equal(output.dtype.type, type2) ndimage.convolve(array, kernel, output=output) assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) assert_equal(output.dtype.type, type2) def test_correlate15(self): kernel = numpy.array([[1, 0], [0, 1]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) output = ndimage.correlate(array, kernel, output=numpy.float32) assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) assert_equal(output.dtype.type, numpy.float32) output = ndimage.convolve(array, kernel, output=numpy.float32) assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) assert_equal(output.dtype.type, numpy.float32) def test_correlate16(self): kernel = numpy.array([[0.5, 0], [0, 0.5]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) output = ndimage.correlate(array, kernel, output=numpy.float32) assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output) assert_equal(output.dtype.type, numpy.float32) output = ndimage.convolve(array, kernel, output=numpy.float32) assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output) assert_equal(output.dtype.type, numpy.float32) def test_correlate17(self): array = numpy.array([1, 2, 3]) tcor = [3, 5, 6] tcov = [2, 3, 5] kernel = numpy.array([1, 1]) output = ndimage.correlate(array, kernel, origin=-1) assert_array_almost_equal(tcor, output) output = ndimage.convolve(array, kernel, origin=-1) assert_array_almost_equal(tcov, output) output = ndimage.correlate1d(array, kernel, origin=-1) assert_array_almost_equal(tcor, output) output = ndimage.convolve1d(array, kernel, origin=-1) assert_array_almost_equal(tcov, output) def test_correlate18(self): kernel = numpy.array([[1, 0], [0, 1]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) output = ndimage.correlate(array, kernel, output=numpy.float32, mode='nearest', origin=-1) assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) assert_equal(output.dtype.type, numpy.float32) output = ndimage.convolve(array, kernel, output=numpy.float32, mode='nearest', origin=-1) assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) assert_equal(output.dtype.type, numpy.float32) def test_correlate19(self): kernel = numpy.array([[1, 0], [0, 1]]) for type1 in self.types: array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) output = ndimage.correlate(array, kernel, output=numpy.float32, mode='nearest', origin=[-1, 0]) assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output) assert_equal(output.dtype.type, numpy.float32) output = ndimage.convolve(array, kernel, output=numpy.float32, mode='nearest', origin=[-1, 0]) assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output) assert_equal(output.dtype.type, numpy.float32) def test_correlate20(self): weights = numpy.array([1, 2, 1]) expected = [[5, 10, 15], [7, 14, 21]] for type1 in self.types: array = numpy.array([[1, 2, 3], [2, 4, 6]], type1) for type2 in self.types: output = numpy.zeros((2, 3), type2) ndimage.correlate1d(array, weights, axis=0, output=output) assert_array_almost_equal(output, expected) ndimage.convolve1d(array, weights, axis=0, output=output) assert_array_almost_equal(output, expected) def test_correlate21(self): array = numpy.array([[1, 2, 3], [2, 4, 6]]) expected = [[5, 10, 15], [7, 14, 21]] weights = numpy.array([1, 2, 1]) output = ndimage.correlate1d(array, weights, axis=0) assert_array_almost_equal(output, expected) output = ndimage.convolve1d(array, weights, axis=0) assert_array_almost_equal(output, expected) def test_correlate22(self): weights = numpy.array([1, 2, 1]) expected = [[6, 12, 18], [6, 12, 18]] for type1 in self.types: array = numpy.array([[1, 2, 3], [2, 4, 6]], type1) for type2 in self.types: output = numpy.zeros((2, 3), type2) ndimage.correlate1d(array, weights, axis=0, mode='wrap', output=output) assert_array_almost_equal(output, expected) ndimage.convolve1d(array, weights, axis=0, mode='wrap', output=output) assert_array_almost_equal(output, expected) def test_correlate23(self): weights = numpy.array([1, 2, 1]) expected = [[5, 10, 15], [7, 14, 21]] for type1 in self.types: array = numpy.array([[1, 2, 3], [2, 4, 6]], type1) for type2 in self.types: output = numpy.zeros((2, 3), type2) ndimage.correlate1d(array, weights, axis=0, mode='nearest', output=output) assert_array_almost_equal(output, expected) ndimage.convolve1d(array, weights, axis=0, mode='nearest', output=output) assert_array_almost_equal(output, expected) def test_correlate24(self): weights = numpy.array([1, 2, 1]) tcor = [[7, 14, 21], [8, 16, 24]] tcov = [[4, 8, 12], [5, 10, 15]] for type1 in self.types: array = numpy.array([[1, 2, 3], [2, 4, 6]], type1) for type2 in self.types: output = numpy.zeros((2, 3), type2) ndimage.correlate1d(array, weights, axis=0, mode='nearest', output=output, origin=-1) assert_array_almost_equal(output, tcor) ndimage.convolve1d(array, weights, axis=0, mode='nearest', output=output, origin=-1) assert_array_almost_equal(output, tcov) def test_correlate25(self): weights = numpy.array([1, 2, 1]) tcor = [[4, 8, 12], [5, 10, 15]] tcov = [[7, 14, 21], [8, 16, 24]] for type1 in self.types: array = numpy.array([[1, 2, 3], [2, 4, 6]], type1) for type2 in self.types: output = numpy.zeros((2, 3), type2) ndimage.correlate1d(array, weights, axis=0, mode='nearest', output=output, origin=1) assert_array_almost_equal(output, tcor) ndimage.convolve1d(array, weights, axis=0, mode='nearest', output=output, origin=1) assert_array_almost_equal(output, tcov) def test_gauss01(self): input = numpy.array([[1, 2, 3], [2, 4, 6]], numpy.float32) output = ndimage.gaussian_filter(input, 0) assert_array_almost_equal(output, input) def test_gauss02(self): input = numpy.array([[1, 2, 3], [2, 4, 6]], numpy.float32) output = ndimage.gaussian_filter(input, 1.0) assert_equal(input.dtype, output.dtype) assert_equal(input.shape, output.shape) def test_gauss03(self): # single precision data" input = numpy.arange(100 * 100).astype(numpy.float32) input.shape = (100, 100) output = ndimage.gaussian_filter(input, [1.0, 1.0]) assert_equal(input.dtype, output.dtype) assert_equal(input.shape, output.shape) # input.sum() is 49995000.0. With single precision floats, we can't # expect more than 8 digits of accuracy, so use decimal=0 in this test. assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'), decimal=0) assert_(sumsq(input, output) > 1.0) def test_gauss04(self): input = numpy.arange(100 * 100).astype(numpy.float32) input.shape = (100, 100) otype = numpy.float64 output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype) assert_equal(output.dtype.type, numpy.float64) assert_equal(input.shape, output.shape) assert_(sumsq(input, output) > 1.0) def test_gauss05(self): input = numpy.arange(100 * 100).astype(numpy.float32) input.shape = (100, 100) otype = numpy.float64 output = ndimage.gaussian_filter(input, [1.0, 1.0], order=1, output=otype) assert_equal(output.dtype.type, numpy.float64) assert_equal(input.shape, output.shape) assert_(sumsq(input, output) > 1.0) def test_gauss06(self): input = numpy.arange(100 * 100).astype(numpy.float32) input.shape = (100, 100) otype = numpy.float64 output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype) output2 = ndimage.gaussian_filter(input, 1.0, output=otype) assert_array_almost_equal(output1, output2) def test_prewitt01(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) output = ndimage.prewitt(array, 0) assert_array_almost_equal(t, output) def test_prewitt02(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) output = numpy.zeros(array.shape, type_) ndimage.prewitt(array, 0, output) assert_array_almost_equal(t, output) def test_prewitt03(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0) output = ndimage.prewitt(array, 1) assert_array_almost_equal(t, output) def test_prewitt04(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.prewitt(array, -1) output = ndimage.prewitt(array, 1) assert_array_almost_equal(t, output) def test_sobel01(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) output = ndimage.sobel(array, 0) assert_array_almost_equal(t, output) def test_sobel02(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) output = numpy.zeros(array.shape, type_) ndimage.sobel(array, 0, output) assert_array_almost_equal(t, output) def test_sobel03(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0) output = numpy.zeros(array.shape, type_) output = ndimage.sobel(array, 1) assert_array_almost_equal(t, output) def test_sobel04(self): for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) t = ndimage.sobel(array, -1) output = ndimage.sobel(array, 1) assert_array_almost_equal(t, output) def test_laplace01(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) output = ndimage.laplace(array) assert_array_almost_equal(tmp1 + tmp2, output) def test_laplace02(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) output = numpy.zeros(array.shape, type_) ndimage.laplace(array, output=output) assert_array_almost_equal(tmp1 + tmp2, output) def test_gaussian_laplace01(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) output = ndimage.gaussian_laplace(array, 1.0) assert_array_almost_equal(tmp1 + tmp2, output) def test_gaussian_laplace02(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) output = numpy.zeros(array.shape, type_) ndimage.gaussian_laplace(array, 1.0, output) assert_array_almost_equal(tmp1 + tmp2, output) def test_generic_laplace01(self): def derivative2(input, axis, output, mode, cval, a, b): sigma = [a, b / 2.0] input = numpy.asarray(input) order = [0] * input.ndim order[axis] = 2 return ndimage.gaussian_filter(input, sigma, order, output, mode, cval) for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = numpy.zeros(array.shape, type_) tmp = ndimage.generic_laplace(array, derivative2, extra_arguments=(1.0,), extra_keywords={'b': 2.0}) ndimage.gaussian_laplace(array, 1.0, output) assert_array_almost_equal(tmp, output) def test_gaussian_gradient_magnitude01(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) output = ndimage.gaussian_gradient_magnitude(array, 1.0) expected = tmp1 * tmp1 + tmp2 * tmp2 expected = numpy.sqrt(expected).astype(type_) assert_array_almost_equal(expected, output) def test_gaussian_gradient_magnitude02(self): for type_ in [numpy.int32, numpy.float32, numpy.float64]: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) * 100 tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) output = numpy.zeros(array.shape, type_) ndimage.gaussian_gradient_magnitude(array, 1.0, output) expected = tmp1 * tmp1 + tmp2 * tmp2 expected = numpy.sqrt(expected).astype(type_) assert_array_almost_equal(expected, output) def test_generic_gradient_magnitude01(self): array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], numpy.float64) def derivative(input, axis, output, mode, cval, a, b): sigma = [a, b / 2.0] input = numpy.asarray(input) order = [0] * input.ndim order[axis] = 1 return ndimage.gaussian_filter(input, sigma, order, output, mode, cval) tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0) tmp2 = ndimage.generic_gradient_magnitude( array, derivative, extra_arguments=(1.0,), extra_keywords={'b': 2.0}) assert_array_almost_equal(tmp1, tmp2) def test_uniform01(self): array = numpy.array([2, 4, 6]) size = 2 output = ndimage.uniform_filter1d(array, size, origin=-1) assert_array_almost_equal([3, 5, 6], output) def test_uniform02(self): array = numpy.array([1, 2, 3]) filter_shape = [0] output = ndimage.uniform_filter(array, filter_shape) assert_array_almost_equal(array, output) def test_uniform03(self): array = numpy.array([1, 2, 3]) filter_shape = [1] output = ndimage.uniform_filter(array, filter_shape) assert_array_almost_equal(array, output) def test_uniform04(self): array = numpy.array([2, 4, 6]) filter_shape = [2] output = ndimage.uniform_filter(array, filter_shape) assert_array_almost_equal([2, 3, 5], output) def test_uniform05(self): array = [] filter_shape = [1] output = ndimage.uniform_filter(array, filter_shape) assert_array_almost_equal([], output) def test_uniform06(self): filter_shape = [2, 2] for type1 in self.types: array = numpy.array([[4, 8, 12], [16, 20, 24]], type1) for type2 in self.types: output = ndimage.uniform_filter( array, filter_shape, output=type2) assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) assert_equal(output.dtype.type, type2) def test_minimum_filter01(self): array = numpy.array([1, 2, 3, 4, 5]) filter_shape = numpy.array([2]) output = ndimage.minimum_filter(array, filter_shape) assert_array_almost_equal([1, 1, 2, 3, 4], output) def test_minimum_filter02(self): array = numpy.array([1, 2, 3, 4, 5]) filter_shape = numpy.array([3]) output = ndimage.minimum_filter(array, filter_shape) assert_array_almost_equal([1, 1, 2, 3, 4], output) def test_minimum_filter03(self): array = numpy.array([3, 2, 5, 1, 4]) filter_shape = numpy.array([2]) output = ndimage.minimum_filter(array, filter_shape) assert_array_almost_equal([3, 2, 2, 1, 1], output) def test_minimum_filter04(self): array = numpy.array([3, 2, 5, 1, 4]) filter_shape = numpy.array([3]) output = ndimage.minimum_filter(array, filter_shape) assert_array_almost_equal([2, 2, 1, 1, 1], output) def test_minimum_filter05(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) filter_shape = numpy.array([2, 3]) output = ndimage.minimum_filter(array, filter_shape) assert_array_almost_equal([[2, 2, 1, 1, 1], [2, 2, 1, 1, 1], [5, 3, 3, 1, 1]], output) def test_minimum_filter06(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 1, 1], [1, 1, 1]] output = ndimage.minimum_filter(array, footprint=footprint) assert_array_almost_equal([[2, 2, 1, 1, 1], [2, 2, 1, 1, 1], [5, 3, 3, 1, 1]], output) def test_minimum_filter07(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.minimum_filter(array, footprint=footprint) assert_array_almost_equal([[2, 2, 1, 1, 1], [2, 3, 1, 3, 1], [5, 5, 3, 3, 1]], output) def test_minimum_filter08(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.minimum_filter(array, footprint=footprint, origin=-1) assert_array_almost_equal([[3, 1, 3, 1, 1], [5, 3, 3, 1, 1], [3, 3, 1, 1, 1]], output) def test_minimum_filter09(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.minimum_filter(array, footprint=footprint, origin=[-1, 0]) assert_array_almost_equal([[2, 3, 1, 3, 1], [5, 5, 3, 3, 1], [5, 3, 3, 1, 1]], output) def test_maximum_filter01(self): array = numpy.array([1, 2, 3, 4, 5]) filter_shape = numpy.array([2]) output = ndimage.maximum_filter(array, filter_shape) assert_array_almost_equal([1, 2, 3, 4, 5], output) def test_maximum_filter02(self): array = numpy.array([1, 2, 3, 4, 5]) filter_shape = numpy.array([3]) output = ndimage.maximum_filter(array, filter_shape) assert_array_almost_equal([2, 3, 4, 5, 5], output) def test_maximum_filter03(self): array = numpy.array([3, 2, 5, 1, 4]) filter_shape = numpy.array([2]) output = ndimage.maximum_filter(array, filter_shape) assert_array_almost_equal([3, 3, 5, 5, 4], output) def test_maximum_filter04(self): array = numpy.array([3, 2, 5, 1, 4]) filter_shape = numpy.array([3]) output = ndimage.maximum_filter(array, filter_shape) assert_array_almost_equal([3, 5, 5, 5, 4], output) def test_maximum_filter05(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) filter_shape = numpy.array([2, 3]) output = ndimage.maximum_filter(array, filter_shape) assert_array_almost_equal([[3, 5, 5, 5, 4], [7, 9, 9, 9, 5], [8, 9, 9, 9, 7]], output) def test_maximum_filter06(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 1, 1], [1, 1, 1]] output = ndimage.maximum_filter(array, footprint=footprint) assert_array_almost_equal([[3, 5, 5, 5, 4], [7, 9, 9, 9, 5], [8, 9, 9, 9, 7]], output) def test_maximum_filter07(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.maximum_filter(array, footprint=footprint) assert_array_almost_equal([[3, 5, 5, 5, 4], [7, 7, 9, 9, 5], [7, 9, 8, 9, 7]], output) def test_maximum_filter08(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.maximum_filter(array, footprint=footprint, origin=-1) assert_array_almost_equal([[7, 9, 9, 5, 5], [9, 8, 9, 7, 5], [8, 8, 7, 7, 7]], output) def test_maximum_filter09(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.maximum_filter(array, footprint=footprint, origin=[-1, 0]) assert_array_almost_equal([[7, 7, 9, 9, 5], [7, 9, 8, 9, 7], [8, 8, 8, 7, 7]], output) def test_rank01(self): array = numpy.array([1, 2, 3, 4, 5]) output = ndimage.rank_filter(array, 1, size=2) assert_array_almost_equal(array, output) output = ndimage.percentile_filter(array, 100, size=2) assert_array_almost_equal(array, output) output = ndimage.median_filter(array, 2) assert_array_almost_equal(array, output) def test_rank02(self): array = numpy.array([1, 2, 3, 4, 5]) output = ndimage.rank_filter(array, 1, size=[3]) assert_array_almost_equal(array, output) output = ndimage.percentile_filter(array, 50, size=3) assert_array_almost_equal(array, output) output = ndimage.median_filter(array, (3,)) assert_array_almost_equal(array, output) def test_rank03(self): array = numpy.array([3, 2, 5, 1, 4]) output = ndimage.rank_filter(array, 1, size=[2]) assert_array_almost_equal([3, 3, 5, 5, 4], output) output = ndimage.percentile_filter(array, 100, size=2) assert_array_almost_equal([3, 3, 5, 5, 4], output) def test_rank04(self): array = numpy.array([3, 2, 5, 1, 4]) expected = [3, 3, 2, 4, 4] output = ndimage.rank_filter(array, 1, size=3) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 50, size=3) assert_array_almost_equal(expected, output) output = ndimage.median_filter(array, size=3) assert_array_almost_equal(expected, output) def test_rank05(self): array = numpy.array([3, 2, 5, 1, 4]) expected = [3, 3, 2, 4, 4] output = ndimage.rank_filter(array, -2, size=3) assert_array_almost_equal(expected, output) def test_rank06(self): array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]]) expected = [[2, 2, 1, 1, 1], [3, 3, 2, 1, 1], [5, 5, 3, 3, 1]] output = ndimage.rank_filter(array, 1, size=[2, 3]) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 17, size=(2, 3)) assert_array_almost_equal(expected, output) def test_rank07(self): array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]]) expected = [[3, 5, 5, 5, 4], [5, 5, 7, 5, 4], [6, 8, 8, 7, 5]] output = ndimage.rank_filter(array, -2, size=[2, 3]) assert_array_almost_equal(expected, output) def test_rank08(self): array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]]) expected = [[3, 3, 2, 4, 4], [5, 5, 5, 4, 4], [5, 6, 7, 5, 5]] output = ndimage.percentile_filter(array, 50.0, size=(2, 3)) assert_array_almost_equal(expected, output) output = ndimage.rank_filter(array, 3, size=(2, 3)) assert_array_almost_equal(expected, output) output = ndimage.median_filter(array, size=(2, 3)) assert_array_almost_equal(expected, output) def test_rank09(self): expected = [[3, 3, 2, 4, 4], [3, 5, 2, 5, 1], [5, 5, 8, 3, 5]] footprint = [[1, 0, 1], [0, 1, 0]] for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = ndimage.rank_filter(array, 1, footprint=footprint) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 35, footprint=footprint) assert_array_almost_equal(expected, output) def test_rank10(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) expected = [[2, 2, 1, 1, 1], [2, 3, 1, 3, 1], [5, 5, 3, 3, 1]] footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.rank_filter(array, 0, footprint=footprint) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 0.0, footprint=footprint) assert_array_almost_equal(expected, output) def test_rank11(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) expected = [[3, 5, 5, 5, 4], [7, 7, 9, 9, 5], [7, 9, 8, 9, 7]] footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.rank_filter(array, -1, footprint=footprint) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 100.0, footprint=footprint) assert_array_almost_equal(expected, output) def test_rank12(self): expected = [[3, 3, 2, 4, 4], [3, 5, 2, 5, 1], [5, 5, 8, 3, 5]] footprint = [[1, 0, 1], [0, 1, 0]] for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = ndimage.rank_filter(array, 1, footprint=footprint) assert_array_almost_equal(expected, output) output = ndimage.percentile_filter(array, 50.0, footprint=footprint) assert_array_almost_equal(expected, output) output = ndimage.median_filter(array, footprint=footprint) assert_array_almost_equal(expected, output) def test_rank13(self): expected = [[5, 2, 5, 1, 1], [5, 8, 3, 5, 5], [6, 6, 5, 5, 5]] footprint = [[1, 0, 1], [0, 1, 0]] for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = ndimage.rank_filter(array, 1, footprint=footprint, origin=-1) assert_array_almost_equal(expected, output) def test_rank14(self): expected = [[3, 5, 2, 5, 1], [5, 5, 8, 3, 5], [5, 6, 6, 5, 5]] footprint = [[1, 0, 1], [0, 1, 0]] for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = ndimage.rank_filter(array, 1, footprint=footprint, origin=[-1, 0]) assert_array_almost_equal(expected, output) def test_rank15(self): "rank filter 15" expected = [[2, 3, 1, 4, 1], [5, 3, 7, 1, 1], [5, 5, 3, 3, 3]] footprint = [[1, 0, 1], [0, 1, 0]] for type_ in self.types: array = numpy.array([[3, 2, 5, 1, 4], [5, 8, 3, 7, 1], [5, 6, 9, 3, 5]], type_) output = ndimage.rank_filter(array, 0, footprint=footprint, origin=[-1, 0]) assert_array_almost_equal(expected, output) def test_generic_filter1d01(self): weights = numpy.array([1.1, 2.2, 3.3]) def _filter_func(input, output, fltr, total): fltr = fltr / total for ii in range(input.shape[0] - 2): output[ii] = input[ii] * fltr[0] output[ii] += input[ii + 1] * fltr[1] output[ii] += input[ii + 2] * fltr[2] for type_ in self.types: a = numpy.arange(12, dtype=type_) a.shape = (3, 4) r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1) r2 = ndimage.generic_filter1d( a, _filter_func, 3, axis=0, origin=-1, extra_arguments=(weights,), extra_keywords={'total': weights.sum()}) assert_array_almost_equal(r1, r2) def test_generic_filter01(self): filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]]) footprint = numpy.array([[1, 0], [0, 1]]) cf = numpy.array([1., 4.]) def _filter_func(buffer, weights, total=1.0): weights = cf / total return (buffer * weights).sum() for type_ in self.types: a = numpy.arange(12, dtype=type_) a.shape = (3, 4) r1 = ndimage.correlate(a, filter_ * footprint) if type_ in self.float_types: r1 /= 5 else: r1 //= 5 r2 = ndimage.generic_filter( a, _filter_func, footprint=footprint, extra_arguments=(cf,), extra_keywords={'total': cf.sum()}) assert_array_almost_equal(r1, r2) def test_extend01(self): array = numpy.array([1, 2, 3]) weights = numpy.array([1, 0]) expected_values = [[1, 1, 2], [3, 1, 2], [1, 1, 2], [2, 1, 2], [0, 1, 2]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend02(self): array = numpy.array([1, 2, 3]) weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0]) expected_values = [[1, 1, 1], [3, 1, 2], [3, 3, 2], [1, 2, 3], [0, 0, 0]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend03(self): array = numpy.array([1, 2, 3]) weights = numpy.array([0, 0, 1]) expected_values = [[2, 3, 3], [2, 3, 1], [2, 3, 3], [2, 3, 2], [2, 3, 0]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend04(self): array = numpy.array([1, 2, 3]) weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) expected_values = [[3, 3, 3], [2, 3, 1], [2, 1, 1], [1, 2, 3], [0, 0, 0]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend05(self): array = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) weights = numpy.array([[1, 0], [0, 0]]) expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]], [[9, 7, 8], [3, 1, 2], [6, 4, 5]], [[1, 1, 2], [1, 1, 2], [4, 4, 5]], [[5, 4, 5], [2, 1, 2], [5, 4, 5]], [[0, 0, 0], [0, 1, 2], [0, 4, 5]]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend06(self): array = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]) expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]], [[5, 6, 4], [8, 9, 7], [2, 3, 1]], [[5, 6, 6], [8, 9, 9], [8, 9, 9]], [[5, 6, 5], [8, 9, 8], [5, 6, 5]], [[5, 6, 0], [8, 9, 0], [0, 0, 0]]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend07(self): array = numpy.array([1, 2, 3]) weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) expected_values = [[3, 3, 3], [2, 3, 1], [2, 1, 1], [1, 2, 3], [0, 0, 0]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend08(self): array = numpy.array([[1], [2], [3]]) weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]]) expected_values = [[[3], [3], [3]], [[2], [3], [1]], [[2], [1], [1]], [[1], [2], [3]], [[0], [0], [0]]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend09(self): array = numpy.array([1, 2, 3]) weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) expected_values = [[3, 3, 3], [2, 3, 1], [2, 1, 1], [1, 2, 3], [0, 0, 0]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_extend10(self): array = numpy.array([[1], [2], [3]]) weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]]) expected_values = [[[3], [3], [3]], [[2], [3], [1]], [[2], [1], [1]], [[1], [2], [3]], [[0], [0], [0]]] for mode, expected_value in zip(self.modes, expected_values): output = ndimage.correlate(array, weights, mode=mode, cval=0) assert_array_equal(output, expected_value) def test_boundaries(self): def shift(x): return (x[0] + 0.5,) data = numpy.array([1, 2, 3, 4.]) expected = {'constant': [1.5, 2.5, 3.5, -1, -1, -1, -1], 'wrap': [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5], 'mirror': [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5], 'nearest': [1.5, 2.5, 3.5, 4, 4, 4, 4]} for mode in expected: assert_array_equal( expected[mode], ndimage.geometric_transform(data, shift, cval=-1, mode=mode, output_shape=(7,), order=1)) def test_boundaries2(self): def shift(x): return (x[0] - 0.9,) data = numpy.array([1, 2, 3, 4]) expected = {'constant': [-1, 1, 2, 3], 'wrap': [3, 1, 2, 3], 'mirror': [2, 1, 2, 3], 'nearest': [1, 1, 2, 3]} for mode in expected: assert_array_equal( expected[mode], ndimage.geometric_transform(data, shift, cval=-1, mode=mode, output_shape=(4,))) def test_fourier_gaussian_real01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.rfft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0) a = fft.ifft(a, shape[1], 1) a = fft.irfft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a), 1, decimal=dec) def test_fourier_gaussian_complex01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.fft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0) a = fft.ifft(a, shape[1], 1) a = fft.ifft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) def test_fourier_uniform_real01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.rfft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0) a = fft.ifft(a, shape[1], 1) a = fft.irfft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) def test_fourier_uniform_complex01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.fft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0) a = fft.ifft(a, shape[1], 1) a = fft.ifft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) def test_fourier_shift_real01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.float32, numpy.float64], [4, 11]): expected = numpy.arange(shape[0] * shape[1], dtype=type_) expected.shape = shape a = fft.rfft(expected, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_shift(a, [1, 1], shape[0], 0) a = fft.ifft(a, shape[1], 1) a = fft.irfft(a, shape[0], 0) assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1], decimal=dec) assert_array_almost_equal(a.imag, numpy.zeros(shape), decimal=dec) def test_fourier_shift_complex01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.complex64, numpy.complex128], [4, 11]): expected = numpy.arange(shape[0] * shape[1], dtype=type_) expected.shape = shape a = fft.fft(expected, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_shift(a, [1, 1], -1, 0) a = fft.ifft(a, shape[1], 1) a = fft.ifft(a, shape[0], 0) assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1], decimal=dec) assert_array_almost_equal(a.imag, numpy.zeros(shape), decimal=dec) def test_fourier_ellipsoid_real01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.float32, numpy.float64], [5, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.rfft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], shape[0], 0) a = fft.ifft(a, shape[1], 1) a = fft.irfft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) def test_fourier_ellipsoid_complex01(self): for shape in [(32, 16), (31, 15)]: for type_, dec in zip([numpy.complex64, numpy.complex128], [5, 14]): a = numpy.zeros(shape, type_) a[0, 0] = 1.0 a = fft.fft(a, shape[0], 0) a = fft.fft(a, shape[1], 1) a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0) a = fft.ifft(a, shape[1], 1) a = fft.ifft(a, shape[0], 0) assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) def test_spline01(self): for type_ in self.types: data = numpy.ones([], type_) for order in range(2, 6): out = ndimage.spline_filter(data, order=order) assert_array_almost_equal(out, 1) def test_spline02(self): for type_ in self.types: data = numpy.array([1], type_) for order in range(2, 6): out = ndimage.spline_filter(data, order=order) assert_array_almost_equal(out, [1]) def test_spline03(self): for type_ in self.types: data = numpy.ones([], type_) for order in range(2, 6): out = ndimage.spline_filter(data, order, output=type_) assert_array_almost_equal(out, 1) def test_spline04(self): for type_ in self.types: data = numpy.ones([4], type_) for order in range(2, 6): out = ndimage.spline_filter(data, order) assert_array_almost_equal(out, [1, 1, 1, 1]) def test_spline05(self): for type_ in self.types: data = numpy.ones([4, 4], type_) for order in range(2, 6): out = ndimage.spline_filter(data, order=order) assert_array_almost_equal(out, [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) def test_geometric_transform01(self): data = numpy.array([1]) def mapping(x): return x for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [1]) def test_geometric_transform02(self): data = numpy.ones([4]) def mapping(x): return x for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [1, 1, 1, 1]) def test_geometric_transform03(self): data = numpy.ones([4]) def mapping(x): return (x[0] - 1,) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [0, 1, 1, 1]) def test_geometric_transform04(self): data = numpy.array([4, 1, 3, 2]) def mapping(x): return (x[0] - 1,) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [0, 4, 1, 3]) def test_geometric_transform05(self): data = numpy.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) def mapping(x): return (x[0], x[1] - 1) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]) def test_geometric_transform06(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) def mapping(x): return (x[0], x[1] - 1) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]) def test_geometric_transform07(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) def mapping(x): return (x[0] - 1, x[1]) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]) def test_geometric_transform08(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) def mapping(x): return (x[0] - 1, x[1] - 1) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, data.shape, order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_geometric_transform10(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) def mapping(x): return (x[0] - 1, x[1] - 1) for order in range(0, 6): if (order > 1): filtered = ndimage.spline_filter(data, order=order) else: filtered = data out = ndimage.geometric_transform(filtered, mapping, data.shape, order=order, prefilter=False) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_geometric_transform13(self): data = numpy.ones([2], numpy.float64) def mapping(x): return (x[0] // 2,) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, [4], order=order) assert_array_almost_equal(out, [1, 1, 1, 1]) def test_geometric_transform14(self): data = [1, 5, 2, 6, 3, 7, 4, 4] def mapping(x): return (2 * x[0],) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, [4], order=order) assert_array_almost_equal(out, [1, 2, 3, 4]) def test_geometric_transform15(self): data = [1, 2, 3, 4] def mapping(x): return (x[0] / 2,) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, [8], order=order) assert_array_almost_equal(out[::2], [1, 2, 3, 4]) def test_geometric_transform16(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9.0, 10, 11, 12]] def mapping(x): return (x[0], x[1] * 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (3, 2), order=order) assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) def test_geometric_transform17(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (x[0] * 2, x[1]) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (1, 4), order=order) assert_array_almost_equal(out, [[1, 2, 3, 4]]) def test_geometric_transform18(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (x[0] * 2, x[1] * 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (1, 2), order=order) assert_array_almost_equal(out, [[1, 3]]) def test_geometric_transform19(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (x[0], x[1] / 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (3, 8), order=order) assert_array_almost_equal(out[..., ::2], data) def test_geometric_transform20(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (x[0] / 2, x[1]) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (6, 4), order=order) assert_array_almost_equal(out[::2, ...], data) def test_geometric_transform21(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (x[0] / 2, x[1] / 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (6, 8), order=order) assert_array_almost_equal(out[::2, ::2], data) def test_geometric_transform22(self): data = numpy.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], numpy.float64) def mapping1(x): return (x[0] / 2, x[1] / 2) def mapping2(x): return (x[0] * 2, x[1] * 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping1, (6, 8), order=order) out = ndimage.geometric_transform(out, mapping2, (3, 4), order=order) assert_array_almost_equal(out, data) def test_geometric_transform23(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x): return (1, x[0] * 2) for order in range(0, 6): out = ndimage.geometric_transform(data, mapping, (2,), order=order) out = out.astype(numpy.int32) assert_array_almost_equal(out, [5, 7]) def test_geometric_transform24(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] def mapping(x, a, b): return (a, x[0] * b) for order in range(0, 6): out = ndimage.geometric_transform( data, mapping, (2,), order=order, extra_arguments=(1,), extra_keywords={'b': 2}) assert_array_almost_equal(out, [5, 7]) def test_geometric_transform_endianness_with_output_parameter(self): # geometric transform given output ndarray or dtype with # non-native endianness. see issue #4127 data = numpy.array([1]) def mapping(x): return x for out in [data.dtype, data.dtype.newbyteorder(), numpy.empty_like(data), numpy.empty_like(data).astype(data.dtype.newbyteorder())]: returned = ndimage.geometric_transform(data, mapping, data.shape, output=out) result = out if returned is None else returned assert_array_almost_equal(result, [1]) def test_map_coordinates01(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) idx = numpy.indices(data.shape) idx -= 1 for order in range(0, 6): out = ndimage.map_coordinates(data, idx, order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_map_coordinates02(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) idx = numpy.indices(data.shape, numpy.float64) idx -= 0.5 for order in range(0, 6): out1 = ndimage.shift(data, 0.5, order=order) out2 = ndimage.map_coordinates(data, idx, order=order) assert_array_almost_equal(out1, out2) def test_map_coordinates03(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]], order='F') idx = numpy.indices(data.shape) - 1 out = ndimage.map_coordinates(data, idx) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) assert_array_almost_equal(out, ndimage.shift(data, (1, 1))) idx = numpy.indices(data[::2].shape) - 1 out = ndimage.map_coordinates(data[::2], idx) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3]]) assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1))) idx = numpy.indices(data[:, ::2].shape) - 1 out = ndimage.map_coordinates(data[:, ::2], idx) assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]]) assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1))) def test_map_coordinates_endianness_with_output_parameter(self): # output parameter given as array or dtype with either endianness # see issue #4127 data = numpy.array([[1, 2], [7, 6]]) expected = numpy.array([[0, 0], [0, 1]]) idx = numpy.indices(data.shape) idx -= 1 for out in [data.dtype, data.dtype.newbyteorder(), numpy.empty_like(expected), numpy.empty_like(expected).astype(expected.dtype.newbyteorder())]: returned = ndimage.map_coordinates(data, idx, output=out) result = out if returned is None else returned assert_array_almost_equal(result, expected) @pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8, reason="do not run on 32 bit or windows (no sparse memory)") def test_map_coordinates_large_data(self): # check crash on large data try: n = 30000 a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n) # fill the part we might read a[n-3:, n-3:] = 0 ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1) except MemoryError: raise pytest.skip("Not enough memory available") def test_affine_transform01(self): data = numpy.array([1]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1]], order=order) assert_array_almost_equal(out, [1]) def test_affine_transform02(self): data = numpy.ones([4]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1]], order=order) assert_array_almost_equal(out, [1, 1, 1, 1]) def test_affine_transform03(self): data = numpy.ones([4]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1]], -1, order=order) assert_array_almost_equal(out, [0, 1, 1, 1]) def test_affine_transform04(self): data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1]], -1, order=order) assert_array_almost_equal(out, [0, 4, 1, 3]) def test_affine_transform05(self): data = numpy.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0], [0, 1]], [0, -1], order=order) assert_array_almost_equal(out, [[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]) def test_affine_transform06(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0], [0, 1]], [0, -1], order=order) assert_array_almost_equal(out, [[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]) def test_affine_transform07(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0], [0, 1]], [-1, 0], order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]) def test_affine_transform08(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0], [0, 1]], [-1, -1], order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_affine_transform09(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): if (order > 1): filtered = ndimage.spline_filter(data, order=order) else: filtered = data out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]], [-1, -1], order=order, prefilter=False) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_affine_transform10(self): data = numpy.ones([2], numpy.float64) for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,), order=order) assert_array_almost_equal(out, [1, 1, 1, 0]) def test_affine_transform11(self): data = [1, 5, 2, 6, 3, 7, 4, 4] for order in range(0, 6): out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order) assert_array_almost_equal(out, [1, 2, 3, 4]) def test_affine_transform12(self): data = [1, 2, 3, 4] for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order) assert_array_almost_equal(out[::2], [1, 2, 3, 4]) def test_affine_transform13(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9.0, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2), order=order) assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) def test_affine_transform14(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4), order=order) assert_array_almost_equal(out, [[1, 2, 3, 4]]) def test_affine_transform15(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2), order=order) assert_array_almost_equal(out, [[1, 3]]) def test_affine_transform16(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0, (3, 8), order=order) assert_array_almost_equal(out[..., ::2], data) def test_affine_transform17(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0, (6, 4), order=order) assert_array_almost_equal(out[::2, ...], data) def test_affine_transform18(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, (6, 8), order=order) assert_array_almost_equal(out[::2, ::2], data) def test_affine_transform19(self): data = numpy.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], numpy.float64) for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, (6, 8), order=order) out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0, (3, 4), order=order) assert_array_almost_equal(out, data) def test_affine_transform20(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[0], [2]], 0, (2,), order=order) assert_array_almost_equal(out, [1, 3]) def test_affine_transform21(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): out = ndimage.affine_transform(data, [[2], [0]], 0, (2,), order=order) assert_array_almost_equal(out, [1, 9]) def test_affine_transform22(self): # shift and offset interaction; see issue #1547 data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): out = ndimage.affine_transform(data, [[2]], [-1], (3,), order=order) assert_array_almost_equal(out, [0, 1, 2]) def test_affine_transform23(self): # shift and offset interaction; see issue #1547 data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): out = ndimage.affine_transform(data, [[0.5]], [-1], (8,), order=order) assert_array_almost_equal(out[::2], [0, 4, 1, 3]) def test_affine_transform24(self): # consistency between diagonal and non-diagonal case; see issue #1547 data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): with suppress_warnings() as sup: sup.filter(UserWarning, "The behaviour of affine_transform with a one-dimensional array .* has changed") out1 = ndimage.affine_transform(data, [2], -1, order=order) out2 = ndimage.affine_transform(data, [[2]], -1, order=order) assert_array_almost_equal(out1, out2) def test_affine_transform25(self): # consistency between diagonal and non-diagonal case; see issue #1547 data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): with suppress_warnings() as sup: sup.filter(UserWarning, "The behaviour of affine_transform with a one-dimensional array .* has changed") out1 = ndimage.affine_transform(data, [0.5], -1, order=order) out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order) assert_array_almost_equal(out1, out2) def test_affine_transform26(self): # test homogeneous coordinates data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): if (order > 1): filtered = ndimage.spline_filter(data, order=order) else: filtered = data tform_original = numpy.eye(2) offset_original = -numpy.ones((2, 1)) tform_h1 = numpy.hstack((tform_original, offset_original)) tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]])) out1 = ndimage.affine_transform(filtered, tform_original, offset_original.ravel(), order=order, prefilter=False) out2 = ndimage.affine_transform(filtered, tform_h1, order=order, prefilter=False) out3 = ndimage.affine_transform(filtered, tform_h2, order=order, prefilter=False) for out in [out1, out2, out3]: assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_affine_transform27(self): # test valid homogeneous transformation matrix data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1)))) tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]])) assert_raises(ValueError, ndimage.affine_transform, data, tform_h2) def test_affine_transform_1d_endianness_with_output_parameter(self): # 1d affine transform given output ndarray or dtype with # either endianness. see issue #7388 data = numpy.ones((2, 2)) for out in [numpy.empty_like(data), numpy.empty_like(data).astype(data.dtype.newbyteorder()), data.dtype, data.dtype.newbyteorder()]: with suppress_warnings() as sup: sup.filter(UserWarning, "The behaviour of affine_transform with a one-dimensional array .* has changed") returned = ndimage.affine_transform(data, [1, 1], output=out) result = out if returned is None else returned assert_array_almost_equal(result, [[1, 1], [1, 1]]) def test_affine_transform_multi_d_endianness_with_output_parameter(self): # affine transform given output ndarray or dtype with either endianness # see issue #4127 data = numpy.array([1]) for out in [data.dtype, data.dtype.newbyteorder(), numpy.empty_like(data), numpy.empty_like(data).astype(data.dtype.newbyteorder())]: returned = ndimage.affine_transform(data, [[1]], output=out) result = out if returned is None else returned assert_array_almost_equal(result, [1]) def test_shift01(self): data = numpy.array([1]) for order in range(0, 6): out = ndimage.shift(data, [1], order=order) assert_array_almost_equal(out, [0]) def test_shift02(self): data = numpy.ones([4]) for order in range(0, 6): out = ndimage.shift(data, [1], order=order) assert_array_almost_equal(out, [0, 1, 1, 1]) def test_shift03(self): data = numpy.ones([4]) for order in range(0, 6): out = ndimage.shift(data, -1, order=order) assert_array_almost_equal(out, [1, 1, 1, 0]) def test_shift04(self): data = numpy.array([4, 1, 3, 2]) for order in range(0, 6): out = ndimage.shift(data, 1, order=order) assert_array_almost_equal(out, [0, 4, 1, 3]) def test_shift05(self): data = numpy.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) for order in range(0, 6): out = ndimage.shift(data, [0, 1], order=order) assert_array_almost_equal(out, [[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]) def test_shift06(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.shift(data, [0, 1], order=order) assert_array_almost_equal(out, [[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]) def test_shift07(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.shift(data, [1, 0], order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]) def test_shift08(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): out = ndimage.shift(data, [1, 1], order=order) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_shift09(self): data = numpy.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) for order in range(0, 6): if (order > 1): filtered = ndimage.spline_filter(data, order=order) else: filtered = data out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False) assert_array_almost_equal(out, [[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) def test_zoom1(self): for order in range(0, 6): for z in [2, [2, 2]]: arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float) arr = ndimage.zoom(arr, z, order=order) assert_equal(arr.shape, (10, 10)) assert_(numpy.all(arr[-1, :] != 0)) assert_(numpy.all(arr[-1, :] >= (20 - eps))) assert_(numpy.all(arr[0, :] <= (5 + eps))) assert_(numpy.all(arr >= (0 - eps))) assert_(numpy.all(arr <= (24 + eps))) def test_zoom2(self): arr = numpy.arange(12).reshape((3, 4)) out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5) assert_array_equal(out, arr) def test_zoom3(self): arr = numpy.array([[1, 2]]) out1 = ndimage.zoom(arr, (2, 1)) out2 = ndimage.zoom(arr, (1, 2)) assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]])) assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]])) def test_zoom_affine01(self): data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] for order in range(0, 6): with suppress_warnings() as sup: sup.filter(UserWarning, "The behaviour of affine_transform with a one-dimensional array .* has changed") out = ndimage.affine_transform(data, [0.5, 0.5], 0, (6, 8), order=order) assert_array_almost_equal(out[::2, ::2], data) def test_zoom_infinity(self): # Ticket #1419 regression test dim = 8 ndimage.zoom(numpy.zeros((dim, dim)), 1./dim, mode='nearest') def test_zoom_zoomfactor_one(self): # Ticket #1122 regression test arr = numpy.zeros((1, 5, 5)) zoom = (1.0, 2.0, 2.0) out = ndimage.zoom(arr, zoom, cval=7) ref = numpy.zeros((1, 10, 10)) assert_array_almost_equal(out, ref) def test_zoom_output_shape_roundoff(self): arr = numpy.zeros((3, 11, 25)) zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25) with suppress_warnings() as sup: sup.filter(UserWarning, "From scipy 0.13.0, the output shape of zoom.. is calculated with round.. instead of int") out = ndimage.zoom(arr, zoom) assert_array_equal(out.shape, (4, 15, 29)) def test_rotate01(self): data = numpy.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 0) assert_array_almost_equal(out, data) def test_rotate02(self): data = numpy.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]], dtype=numpy.float64) expected = numpy.array([[0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 90) assert_array_almost_equal(out, expected) def test_rotate03(self): data = numpy.array([[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=numpy.float64) expected = numpy.array([[0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 90) assert_array_almost_equal(out, expected) def test_rotate04(self): data = numpy.array([[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=numpy.float64) expected = numpy.array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 90, reshape=False) assert_array_almost_equal(out, expected) def test_rotate05(self): data = numpy.empty((4, 3, 3)) for i in range(3): data[:, :, i] = numpy.array([[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]], dtype=numpy.float64) expected = numpy.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 90) for i in range(3): assert_array_almost_equal(out[:, :, i], expected) def test_rotate06(self): data = numpy.empty((3, 4, 3)) for i in range(3): data[:, :, i] = numpy.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]], dtype=numpy.float64) expected = numpy.array([[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]], dtype=numpy.float64) for order in range(0, 6): out = ndimage.rotate(data, 90) for i in range(3): assert_array_almost_equal(out[:, :, i], expected) def test_rotate07(self): data = numpy.array([[[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) data = data.transpose() expected = numpy.array([[[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0], [0, 0, 0]]] * 2, dtype=numpy.float64) expected = expected.transpose([2, 1, 0]) for order in range(0, 6): out = ndimage.rotate(data, 90, axes=(0, 1)) assert_array_almost_equal(out, expected) def test_rotate08(self): data = numpy.array([[[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) data = data.transpose() expected = numpy.array([[[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) expected = expected.transpose() for order in range(0, 6): out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False) assert_array_almost_equal(out, expected) def test_watershed_ift01(self): data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.int8) out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]]) expected = [[-1, -1, -1, -1, -1, -1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift02(self): data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.int8) out = ndimage.watershed_ift(data, markers) expected = [[-1, -1, -1, -1, -1, -1, -1], [-1, -1, 1, 1, 1, -1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, -1, 1, 1, 1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift03(self): data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, -1]], numpy.int8) out = ndimage.watershed_ift(data, markers) expected = [[-1, -1, -1, -1, -1, -1, -1], [-1, -1, 2, -1, 3, -1, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, -1, 2, -1, 3, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift04(self): data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, -1]], numpy.int8) out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]]) expected = [[-1, -1, -1, -1, -1, -1, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, 2, 2, 3, 3, 3, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift05(self): data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 3, 0, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, -1]], numpy.int8) out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]]) expected = [[-1, -1, -1, -1, -1, -1, -1], [-1, 3, 3, 2, 2, 2, -1], [-1, 3, 3, 2, 2, 2, -1], [-1, 3, 3, 2, 2, 2, -1], [-1, 3, 3, 2, 2, 2, -1], [-1, 3, 3, 2, 2, 2, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift06(self): data = numpy.array([[0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.int8) out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]]) expected = [[-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_watershed_ift07(self): shape = (7, 6) data = numpy.zeros(shape, dtype=numpy.uint8) data = data.transpose() data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], numpy.int8) out = numpy.zeros(shape, dtype=numpy.int16) out = out.transpose() ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]], output=out) expected = [[-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, 1, 1, 1, 1, 1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] assert_array_almost_equal(out, expected) def test_distance_transform_bf01(self): # brute force (bf) distance transform for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_bf(data, 'euclidean', return_indices=True) expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 2, 4, 2, 1, 0, 0], [0, 0, 1, 4, 8, 4, 1, 0, 0], [0, 0, 1, 2, 4, 2, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] assert_array_almost_equal(out * out, expected) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 1, 2, 2, 2, 2], [3, 3, 3, 2, 1, 2, 3, 3, 3], [4, 4, 4, 4, 6, 4, 4, 4, 4], [5, 5, 6, 6, 7, 6, 6, 5, 5], [6, 6, 6, 7, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 1, 2, 4, 6, 7, 7, 8], [0, 1, 1, 1, 6, 7, 7, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_bf02(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_bf(data, 'cityblock', return_indices=True) expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 2, 2, 2, 1, 0, 0], [0, 0, 1, 2, 3, 2, 1, 0, 0], [0, 0, 1, 2, 2, 2, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] assert_array_almost_equal(out, expected) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 1, 2, 2, 2, 2], [3, 3, 3, 3, 1, 3, 3, 3, 3], [4, 4, 4, 4, 7, 4, 4, 4, 4], [5, 5, 6, 7, 7, 7, 6, 5, 5], [6, 6, 6, 7, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 1, 1, 4, 7, 7, 7, 8], [0, 1, 1, 1, 4, 7, 7, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(expected, ft) def test_distance_transform_bf03(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_bf(data, 'chessboard', return_indices=True) expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 2, 1, 1, 0, 0], [0, 0, 1, 2, 2, 2, 1, 0, 0], [0, 0, 1, 1, 2, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] assert_array_almost_equal(out, expected) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 1, 2, 2, 2, 2], [3, 3, 4, 2, 2, 2, 4, 3, 3], [4, 4, 5, 6, 6, 6, 5, 4, 4], [5, 5, 6, 6, 7, 6, 6, 5, 5], [6, 6, 6, 7, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 5, 6, 6, 7, 8], [0, 1, 1, 2, 6, 6, 7, 7, 8], [0, 1, 1, 2, 6, 7, 7, 7, 8], [0, 1, 2, 2, 6, 6, 7, 7, 8], [0, 1, 2, 4, 5, 6, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_bf04(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) tdt, tft = ndimage.distance_transform_bf(data, return_indices=1) dts = [] fts = [] dt = numpy.zeros(data.shape, dtype=numpy.float64) ndimage.distance_transform_bf(data, distances=dt) dts.append(dt) ft = ndimage.distance_transform_bf( data, return_distances=False, return_indices=1) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_bf( data, return_distances=False, return_indices=True, indices=ft) fts.append(ft) dt, ft = ndimage.distance_transform_bf( data, return_indices=1) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.float64) ft = ndimage.distance_transform_bf( data, distances=dt, return_indices=True) dts.append(dt) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) dt = ndimage.distance_transform_bf( data, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.float64) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_bf( data, distances=dt, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) for dt in dts: assert_array_almost_equal(tdt, dt) for ft in fts: assert_array_almost_equal(tft, ft) def test_distance_transform_bf05(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_bf( data, 'euclidean', return_indices=True, sampling=[2, 2]) expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 4, 4, 0, 0, 0], [0, 0, 4, 8, 16, 8, 4, 0, 0], [0, 0, 4, 16, 32, 16, 4, 0, 0], [0, 0, 4, 8, 16, 8, 4, 0, 0], [0, 0, 0, 4, 4, 4, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] assert_array_almost_equal(out * out, expected) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 1, 2, 2, 2, 2], [3, 3, 3, 2, 1, 2, 3, 3, 3], [4, 4, 4, 4, 6, 4, 4, 4, 4], [5, 5, 6, 6, 7, 6, 6, 5, 5], [6, 6, 6, 7, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 1, 2, 4, 6, 7, 7, 8], [0, 1, 1, 1, 6, 7, 7, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_bf06(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_bf( data, 'euclidean', return_indices=True, sampling=[2, 1]) expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 4, 1, 0, 0, 0], [0, 0, 1, 4, 8, 4, 1, 0, 0], [0, 0, 1, 4, 9, 4, 1, 0, 0], [0, 0, 1, 4, 8, 4, 1, 0, 0], [0, 0, 0, 1, 4, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]] assert_array_almost_equal(out * out, expected) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 2, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 6, 5, 5, 5, 5], [6, 6, 6, 6, 7, 6, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 6, 6, 6, 7, 8], [0, 1, 1, 1, 6, 7, 7, 7, 8], [0, 1, 1, 1, 7, 7, 7, 7, 8], [0, 1, 1, 1, 6, 7, 7, 7, 8], [0, 1, 2, 2, 4, 6, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_cdt01(self): # chamfer type distance (cdt) transform for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_cdt( data, 'cityblock', return_indices=True) bf = ndimage.distance_transform_bf(data, 'cityblock') assert_array_almost_equal(bf, out) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 1, 1, 1, 2, 2, 2], [3, 3, 2, 1, 1, 1, 2, 3, 3], [4, 4, 4, 4, 1, 4, 4, 4, 4], [5, 5, 5, 5, 7, 7, 6, 5, 5], [6, 6, 6, 6, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 1, 1, 4, 7, 7, 7, 8], [0, 1, 1, 1, 4, 5, 6, 7, 8], [0, 1, 2, 2, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_cdt02(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_cdt(data, 'chessboard', return_indices=True) bf = ndimage.distance_transform_bf(data, 'chessboard') assert_array_almost_equal(bf, out) expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 1, 1, 1, 2, 2, 2], [3, 3, 2, 2, 1, 2, 2, 3, 3], [4, 4, 3, 2, 2, 2, 3, 4, 4], [5, 5, 4, 6, 7, 6, 4, 5, 5], [6, 6, 6, 6, 7, 7, 6, 6, 6], [7, 7, 7, 7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8, 8, 8, 8]], [[0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 2, 3, 4, 6, 7, 8], [0, 1, 1, 2, 2, 6, 6, 7, 8], [0, 1, 1, 1, 2, 6, 7, 7, 8], [0, 1, 1, 2, 6, 6, 7, 7, 8], [0, 1, 2, 2, 5, 6, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8]]] assert_array_almost_equal(ft, expected) def test_distance_transform_cdt03(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True) dts = [] fts = [] dt = numpy.zeros(data.shape, dtype=numpy.int32) ndimage.distance_transform_cdt(data, distances=dt) dts.append(dt) ft = ndimage.distance_transform_cdt( data, return_distances=False, return_indices=True) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_cdt( data, return_distances=False, return_indices=True, indices=ft) fts.append(ft) dt, ft = ndimage.distance_transform_cdt( data, return_indices=True) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.int32) ft = ndimage.distance_transform_cdt( data, distances=dt, return_indices=True) dts.append(dt) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) dt = ndimage.distance_transform_cdt( data, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.int32) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_cdt(data, distances=dt, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) for dt in dts: assert_array_almost_equal(tdt, dt) for ft in fts: assert_array_almost_equal(tft, ft) def test_distance_transform_edt01(self): # euclidean distance transform (edt) for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) out, ft = ndimage.distance_transform_edt(data, return_indices=True) bf = ndimage.distance_transform_bf(data, 'euclidean') assert_array_almost_equal(bf, out) dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype) dt = dt.astype(numpy.float64) numpy.multiply(dt, dt, dt) dt = numpy.add.reduce(dt, axis=0) numpy.sqrt(dt, dt) assert_array_almost_equal(bf, dt) def test_distance_transform_edt02(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) tdt, tft = ndimage.distance_transform_edt(data, return_indices=True) dts = [] fts = [] dt = numpy.zeros(data.shape, dtype=numpy.float64) ndimage.distance_transform_edt(data, distances=dt) dts.append(dt) ft = ndimage.distance_transform_edt( data, return_distances=0, return_indices=True) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_edt( data, return_distances=False, return_indices=True, indices=ft) fts.append(ft) dt, ft = ndimage.distance_transform_edt( data, return_indices=True) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.float64) ft = ndimage.distance_transform_edt( data, distances=dt, return_indices=True) dts.append(dt) fts.append(ft) ft = numpy.indices(data.shape, dtype=numpy.int32) dt = ndimage.distance_transform_edt( data, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) dt = numpy.zeros(data.shape, dtype=numpy.float64) ft = numpy.indices(data.shape, dtype=numpy.int32) ndimage.distance_transform_edt( data, distances=dt, return_indices=True, indices=ft) dts.append(dt) fts.append(ft) for dt in dts: assert_array_almost_equal(tdt, dt) for ft in fts: assert_array_almost_equal(tft, ft) def test_distance_transform_edt03(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2]) out = ndimage.distance_transform_edt(data, sampling=[2, 2]) assert_array_almost_equal(ref, out) def test_distance_transform_edt4(self): for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1]) out = ndimage.distance_transform_edt(data, sampling=[2, 1]) assert_array_almost_equal(ref, out) def test_distance_transform_edt5(self): # Ticket #954 regression test out = ndimage.distance_transform_edt(False) assert_array_almost_equal(out, [0.]) def test_generate_structure01(self): struct = ndimage.generate_binary_structure(0, 1) assert_array_almost_equal(struct, 1) def test_generate_structure02(self): struct = ndimage.generate_binary_structure(1, 1) assert_array_almost_equal(struct, [1, 1, 1]) def test_generate_structure03(self): struct = ndimage.generate_binary_structure(2, 1) assert_array_almost_equal(struct, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) def test_generate_structure04(self): struct = ndimage.generate_binary_structure(2, 2) assert_array_almost_equal(struct, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_iterate_structure01(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] out = ndimage.iterate_structure(struct, 2) assert_array_almost_equal(out, [[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]) def test_iterate_structure02(self): struct = [[0, 1], [1, 1], [0, 1]] out = ndimage.iterate_structure(struct, 2) assert_array_almost_equal(out, [[0, 0, 1], [0, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1]]) def test_iterate_structure03(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] out = ndimage.iterate_structure(struct, 2, 1) expected = [[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]] assert_array_almost_equal(out[0], expected) assert_equal(out[1], [2, 2]) def test_binary_erosion01(self): for type_ in self.types: data = numpy.ones([], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, 1) def test_binary_erosion02(self): for type_ in self.types: data = numpy.ones([], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, 1) def test_binary_erosion03(self): for type_ in self.types: data = numpy.ones([1], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [0]) def test_binary_erosion04(self): for type_ in self.types: data = numpy.ones([1], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [1]) def test_binary_erosion05(self): for type_ in self.types: data = numpy.ones([3], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [0, 1, 0]) def test_binary_erosion06(self): for type_ in self.types: data = numpy.ones([3], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [1, 1, 1]) def test_binary_erosion07(self): for type_ in self.types: data = numpy.ones([5], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [0, 1, 1, 1, 0]) def test_binary_erosion08(self): for type_ in self.types: data = numpy.ones([5], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [1, 1, 1, 1, 1]) def test_binary_erosion09(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [0, 0, 0, 0, 0]) def test_binary_erosion10(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [1, 0, 0, 0, 1]) def test_binary_erosion11(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 struct = [1, 0, 1] out = ndimage.binary_erosion(data, struct, border_value=1) assert_array_almost_equal(out, [1, 0, 1, 0, 1]) def test_binary_erosion12(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 struct = [1, 0, 1] out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1) assert_array_almost_equal(out, [0, 1, 0, 1, 1]) def test_binary_erosion13(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 struct = [1, 0, 1] out = ndimage.binary_erosion(data, struct, border_value=1, origin=1) assert_array_almost_equal(out, [1, 1, 0, 1, 0]) def test_binary_erosion14(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 struct = [1, 1] out = ndimage.binary_erosion(data, struct, border_value=1) assert_array_almost_equal(out, [1, 1, 0, 0, 1]) def test_binary_erosion15(self): for type_ in self.types: data = numpy.ones([5], type_) data[2] = 0 struct = [1, 1] out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1) assert_array_almost_equal(out, [1, 0, 0, 1, 1]) def test_binary_erosion16(self): for type_ in self.types: data = numpy.ones([1, 1], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [[1]]) def test_binary_erosion17(self): for type_ in self.types: data = numpy.ones([1, 1], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [[0]]) def test_binary_erosion18(self): for type_ in self.types: data = numpy.ones([1, 3], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [[0, 0, 0]]) def test_binary_erosion19(self): for type_ in self.types: data = numpy.ones([1, 3], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [[1, 1, 1]]) def test_binary_erosion20(self): for type_ in self.types: data = numpy.ones([3, 3], type_) out = ndimage.binary_erosion(data) assert_array_almost_equal(out, [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) def test_binary_erosion21(self): for type_ in self.types: data = numpy.ones([3, 3], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_binary_erosion22(self): expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_erosion(data, border_value=1) assert_array_almost_equal(out, expected) def test_binary_erosion23(self): struct = ndimage.generate_binary_structure(2, 2) expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_erosion(data, struct, border_value=1) assert_array_almost_equal(out, expected) def test_binary_erosion24(self): struct = [[0, 1], [1, 1]] expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_erosion(data, struct, border_value=1) assert_array_almost_equal(out, expected) def test_binary_erosion25(self): struct = [[0, 1, 0], [1, 0, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 0, 1, 1], [0, 0, 1, 0, 1, 1, 0, 0], [0, 1, 0, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_erosion(data, struct, border_value=1) assert_array_almost_equal(out, expected) def test_binary_erosion26(self): struct = [[0, 1, 0], [1, 0, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 0, 1, 1], [0, 0, 1, 0, 1, 1, 0, 0], [0, 1, 0, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_erosion(data, struct, border_value=1, origin=(-1, -1)) assert_array_almost_equal(out, expected) def test_binary_erosion27(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_erosion(data, struct, border_value=1, iterations=2) assert_array_almost_equal(out, expected) def test_binary_erosion28(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], bool) out = numpy.zeros(data.shape, bool) ndimage.binary_erosion(data, struct, border_value=1, iterations=2, output=out) assert_array_almost_equal(out, expected) def test_binary_erosion29(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]], bool) out = ndimage.binary_erosion(data, struct, border_value=1, iterations=3) assert_array_almost_equal(out, expected) def test_binary_erosion30(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]], bool) out = numpy.zeros(data.shape, bool) ndimage.binary_erosion(data, struct, border_value=1, iterations=3, output=out) assert_array_almost_equal(out, expected) def test_binary_erosion31(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1]] data = numpy.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]], bool) out = numpy.zeros(data.shape, bool) ndimage.binary_erosion(data, struct, border_value=1, iterations=1, output=out, origin=(-1, -1)) assert_array_almost_equal(out, expected) def test_binary_erosion32(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_erosion(data, struct, border_value=1, iterations=2) assert_array_almost_equal(out, expected) def test_binary_erosion33(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] mask = [[1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]] data = numpy.array([[0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_erosion(data, struct, border_value=1, mask=mask, iterations=-1) assert_array_almost_equal(out, expected) def test_binary_erosion34(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] mask = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_erosion(data, struct, border_value=1, mask=mask) assert_array_almost_equal(out, expected) def test_binary_erosion35(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] mask = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]], bool) tmp = [[0, 0, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1]] expected = numpy.logical_and(tmp, mask) tmp = numpy.logical_and(data, numpy.logical_not(mask)) expected = numpy.logical_or(expected, tmp) out = numpy.zeros(data.shape, bool) ndimage.binary_erosion(data, struct, border_value=1, iterations=1, output=out, origin=(-1, -1), mask=mask) assert_array_almost_equal(out, expected) def test_binary_erosion36(self): struct = [[0, 1, 0], [1, 0, 1], [0, 1, 0]] mask = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] tmp = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 0, 1, 1], [0, 0, 1, 0, 1, 1, 0, 0], [0, 1, 0, 1, 1, 1, 1, 0], [0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) expected = numpy.logical_and(tmp, mask) tmp = numpy.logical_and(data, numpy.logical_not(mask)) expected = numpy.logical_or(expected, tmp) out = ndimage.binary_erosion(data, struct, mask=mask, border_value=1, origin=(-1, -1)) assert_array_almost_equal(out, expected) def test_binary_erosion37(self): a = numpy.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], dtype=bool) b = numpy.zeros_like(a) out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0, border_value=True, brute_force=True) assert_(out is b) assert_array_equal( ndimage.binary_erosion(a, structure=a, iterations=0, border_value=True), b) def test_binary_dilation01(self): for type_ in self.types: data = numpy.ones([], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, 1) def test_binary_dilation02(self): for type_ in self.types: data = numpy.zeros([], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, 0) def test_binary_dilation03(self): for type_ in self.types: data = numpy.ones([1], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [1]) def test_binary_dilation04(self): for type_ in self.types: data = numpy.zeros([1], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [0]) def test_binary_dilation05(self): for type_ in self.types: data = numpy.ones([3], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [1, 1, 1]) def test_binary_dilation06(self): for type_ in self.types: data = numpy.zeros([3], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [0, 0, 0]) def test_binary_dilation07(self): for type_ in self.types: data = numpy.zeros([3], type_) data[1] = 1 out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [1, 1, 1]) def test_binary_dilation08(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 data[3] = 1 out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [1, 1, 1, 1, 1]) def test_binary_dilation09(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [1, 1, 1, 0, 0]) def test_binary_dilation10(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 out = ndimage.binary_dilation(data, origin=-1) assert_array_almost_equal(out, [0, 1, 1, 1, 0]) def test_binary_dilation11(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 out = ndimage.binary_dilation(data, origin=1) assert_array_almost_equal(out, [1, 1, 0, 0, 0]) def test_binary_dilation12(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 struct = [1, 0, 1] out = ndimage.binary_dilation(data, struct) assert_array_almost_equal(out, [1, 0, 1, 0, 0]) def test_binary_dilation13(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 struct = [1, 0, 1] out = ndimage.binary_dilation(data, struct, border_value=1) assert_array_almost_equal(out, [1, 0, 1, 0, 1]) def test_binary_dilation14(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 struct = [1, 0, 1] out = ndimage.binary_dilation(data, struct, origin=-1) assert_array_almost_equal(out, [0, 1, 0, 1, 0]) def test_binary_dilation15(self): for type_ in self.types: data = numpy.zeros([5], type_) data[1] = 1 struct = [1, 0, 1] out = ndimage.binary_dilation(data, struct, origin=-1, border_value=1) assert_array_almost_equal(out, [1, 1, 0, 1, 0]) def test_binary_dilation16(self): for type_ in self.types: data = numpy.ones([1, 1], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [[1]]) def test_binary_dilation17(self): for type_ in self.types: data = numpy.zeros([1, 1], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [[0]]) def test_binary_dilation18(self): for type_ in self.types: data = numpy.ones([1, 3], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [[1, 1, 1]]) def test_binary_dilation19(self): for type_ in self.types: data = numpy.ones([3, 3], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_binary_dilation20(self): for type_ in self.types: data = numpy.zeros([3, 3], type_) data[1, 1] = 1 out = ndimage.binary_dilation(data) assert_array_almost_equal(out, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) def test_binary_dilation21(self): struct = ndimage.generate_binary_structure(2, 2) for type_ in self.types: data = numpy.zeros([3, 3], type_) data[1, 1] = 1 out = ndimage.binary_dilation(data, struct) assert_array_almost_equal(out, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_binary_dilation22(self): expected = [[0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data) assert_array_almost_equal(out, expected) def test_binary_dilation23(self): expected = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 0, 0, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, border_value=1) assert_array_almost_equal(out, expected) def test_binary_dilation24(self): expected = [[1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, origin=(1, 1)) assert_array_almost_equal(out, expected) def test_binary_dilation25(self): expected = [[1, 1, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1) assert_array_almost_equal(out, expected) def test_binary_dilation26(self): struct = ndimage.generate_binary_structure(2, 2) expected = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, struct) assert_array_almost_equal(out, expected) def test_binary_dilation27(self): struct = [[0, 1], [1, 1]] expected = [[0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, struct) assert_array_almost_equal(out, expected) def test_binary_dilation28(self): expected = [[1, 1, 1, 1], [1, 0, 0, 1], [1, 0, 0, 1], [1, 1, 1, 1]] for type_ in self.types: data = numpy.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, border_value=1) assert_array_almost_equal(out, expected) def test_binary_dilation29(self): struct = [[0, 1], [1, 1]] expected = [[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]], bool) out = ndimage.binary_dilation(data, struct, iterations=2) assert_array_almost_equal(out, expected) def test_binary_dilation30(self): struct = [[0, 1], [1, 1]] expected = [[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]], bool) out = numpy.zeros(data.shape, bool) ndimage.binary_dilation(data, struct, iterations=2, output=out) assert_array_almost_equal(out, expected) def test_binary_dilation31(self): struct = [[0, 1], [1, 1]] expected = [[0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]], bool) out = ndimage.binary_dilation(data, struct, iterations=3) assert_array_almost_equal(out, expected) def test_binary_dilation32(self): struct = [[0, 1], [1, 1]] expected = [[0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]] data = numpy.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]], bool) out = numpy.zeros(data.shape, bool) ndimage.binary_dilation(data, struct, iterations=3, output=out) assert_array_almost_equal(out, expected) def test_binary_dilation33(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_dilation(data, struct, iterations=-1, mask=mask, border_value=0) assert_array_almost_equal(out, expected) def test_binary_dilation34(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.zeros(mask.shape, bool) out = ndimage.binary_dilation(data, struct, iterations=-1, mask=mask, border_value=1) assert_array_almost_equal(out, expected) def test_binary_dilation35(self): tmp = [[1, 1, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]] data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) mask = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] expected = numpy.logical_and(tmp, mask) tmp = numpy.logical_and(data, numpy.logical_not(mask)) expected = numpy.logical_or(expected, tmp) for type_ in self.types: data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_dilation(data, mask=mask, origin=(1, 1), border_value=1) assert_array_almost_equal(out, expected) def test_binary_propagation01(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_propagation(data, struct, mask=mask, border_value=0) assert_array_almost_equal(out, expected) def test_binary_propagation02(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.zeros(mask.shape, bool) out = ndimage.binary_propagation(data, struct, mask=mask, border_value=1) assert_array_almost_equal(out, expected) def test_binary_opening01(self): expected = [[0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 0, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_opening(data) assert_array_almost_equal(out, expected) def test_binary_opening02(self): struct = ndimage.generate_binary_structure(2, 2) expected = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_opening(data, struct) assert_array_almost_equal(out, expected) def test_binary_closing01(self): expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 0, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_closing(data) assert_array_almost_equal(out, expected) def test_binary_closing02(self): struct = ndimage.generate_binary_structure(2, 2) expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_closing(data, struct) assert_array_almost_equal(out, expected) def test_binary_fill_holes01(self): expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_fill_holes(data) assert_array_almost_equal(out, expected) def test_binary_fill_holes02(self): expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_fill_holes(data) assert_array_almost_equal(out, expected) def test_binary_fill_holes03(self): expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]], bool) data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 1, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]], bool) out = ndimage.binary_fill_holes(data) assert_array_almost_equal(out, expected) def test_grey_erosion01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] output = ndimage.grey_erosion(array, footprint=footprint) assert_array_almost_equal([[2, 2, 1, 1, 1], [2, 3, 1, 3, 1], [5, 5, 3, 3, 1]], output) def test_grey_erosion02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] output = ndimage.grey_erosion(array, footprint=footprint, structure=structure) assert_array_almost_equal([[2, 2, 1, 1, 1], [2, 3, 1, 3, 1], [5, 5, 3, 3, 1]], output) def test_grey_erosion03(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[1, 1, 1], [1, 1, 1]] output = ndimage.grey_erosion(array, footprint=footprint, structure=structure) assert_array_almost_equal([[1, 1, 0, 0, 0], [1, 2, 0, 2, 0], [4, 4, 2, 2, 0]], output) def test_grey_dilation01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[0, 1, 1], [1, 0, 1]] output = ndimage.grey_dilation(array, footprint=footprint) assert_array_almost_equal([[7, 7, 9, 9, 5], [7, 9, 8, 9, 7], [8, 8, 8, 7, 7]], output) def test_grey_dilation02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[0, 1, 1], [1, 0, 1]] structure = [[0, 0, 0], [0, 0, 0]] output = ndimage.grey_dilation(array, footprint=footprint, structure=structure) assert_array_almost_equal([[7, 7, 9, 9, 5], [7, 9, 8, 9, 7], [8, 8, 8, 7, 7]], output) def test_grey_dilation03(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[0, 1, 1], [1, 0, 1]] structure = [[1, 1, 1], [1, 1, 1]] output = ndimage.grey_dilation(array, footprint=footprint, structure=structure) assert_array_almost_equal([[8, 8, 10, 10, 6], [8, 10, 9, 10, 8], [9, 9, 9, 8, 8]], output) def test_grey_opening01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] tmp = ndimage.grey_erosion(array, footprint=footprint) expected = ndimage.grey_dilation(tmp, footprint=footprint) output = ndimage.grey_opening(array, footprint=footprint) assert_array_almost_equal(expected, output) def test_grey_opening02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_erosion(array, footprint=footprint, structure=structure) expected = ndimage.grey_dilation(tmp, footprint=footprint, structure=structure) output = ndimage.grey_opening(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_grey_closing01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] tmp = ndimage.grey_dilation(array, footprint=footprint) expected = ndimage.grey_erosion(tmp, footprint=footprint) output = ndimage.grey_closing(array, footprint=footprint) assert_array_almost_equal(expected, output) def test_grey_closing02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_dilation(array, footprint=footprint, structure=structure) expected = ndimage.grey_erosion(tmp, footprint=footprint, structure=structure) output = ndimage.grey_closing(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_morphological_gradient01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp1 = ndimage.grey_dilation(array, footprint=footprint, structure=structure) tmp2 = ndimage.grey_erosion(array, footprint=footprint, structure=structure) expected = tmp1 - tmp2 output = numpy.zeros(array.shape, array.dtype) ndimage.morphological_gradient(array, footprint=footprint, structure=structure, output=output) assert_array_almost_equal(expected, output) def test_morphological_gradient02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp1 = ndimage.grey_dilation(array, footprint=footprint, structure=structure) tmp2 = ndimage.grey_erosion(array, footprint=footprint, structure=structure) expected = tmp1 - tmp2 output = ndimage.morphological_gradient(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_morphological_laplace01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp1 = ndimage.grey_dilation(array, footprint=footprint, structure=structure) tmp2 = ndimage.grey_erosion(array, footprint=footprint, structure=structure) expected = tmp1 + tmp2 - 2 * array output = numpy.zeros(array.shape, array.dtype) ndimage.morphological_laplace(array, footprint=footprint, structure=structure, output=output) assert_array_almost_equal(expected, output) def test_morphological_laplace02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp1 = ndimage.grey_dilation(array, footprint=footprint, structure=structure) tmp2 = ndimage.grey_erosion(array, footprint=footprint, structure=structure) expected = tmp1 + tmp2 - 2 * array output = ndimage.morphological_laplace(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_white_tophat01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_opening(array, footprint=footprint, structure=structure) expected = array - tmp output = numpy.zeros(array.shape, array.dtype) ndimage.white_tophat(array, footprint=footprint, structure=structure, output=output) assert_array_almost_equal(expected, output) def test_white_tophat02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_opening(array, footprint=footprint, structure=structure) expected = array - tmp output = ndimage.white_tophat(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_white_tophat03(self): array = numpy.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) structure = numpy.ones((3, 3), dtype=numpy.bool_) expected = numpy.array([[0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [0, 1, 1, 0, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1], [0, 1, 1, 0, 0, 0, 1], [0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_) output = ndimage.white_tophat(array, structure=structure) assert_array_equal(expected, output) def test_white_tophat04(self): array = numpy.eye(5, dtype=numpy.bool_) structure = numpy.ones((3, 3), dtype=numpy.bool_) # Check that type missmatch is properly handled output = numpy.empty_like(array, dtype=numpy.float) ndimage.white_tophat(array, structure=structure, output=output) def test_black_tophat01(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_closing(array, footprint=footprint, structure=structure) expected = tmp - array output = numpy.zeros(array.shape, array.dtype) ndimage.black_tophat(array, footprint=footprint, structure=structure, output=output) assert_array_almost_equal(expected, output) def test_black_tophat02(self): array = numpy.array([[3, 2, 5, 1, 4], [7, 6, 9, 3, 5], [5, 8, 3, 7, 1]]) footprint = [[1, 0, 1], [1, 1, 0]] structure = [[0, 0, 0], [0, 0, 0]] tmp = ndimage.grey_closing(array, footprint=footprint, structure=structure) expected = tmp - array output = ndimage.black_tophat(array, footprint=footprint, structure=structure) assert_array_almost_equal(expected, output) def test_black_tophat03(self): array = numpy.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) structure = numpy.ones((3, 3), dtype=numpy.bool_) expected = numpy.array([[0, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_) output = ndimage.black_tophat(array, structure=structure) assert_array_equal(expected, output) def test_black_tophat04(self): array = numpy.eye(5, dtype=numpy.bool_) structure = numpy.ones((3, 3), dtype=numpy.bool_) # Check that type missmatch is properly handled output = numpy.empty_like(array, dtype=numpy.float) ndimage.black_tophat(array, structure=structure, output=output) def test_hit_or_miss01(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 1, 0, 0, 0], [1, 1, 1, 0, 0], [0, 1, 0, 1, 1], [0, 0, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 1, 1], [0, 1, 1, 1, 1], [0, 0, 0, 0, 0]], type_) out = numpy.zeros(data.shape, bool) ndimage.binary_hit_or_miss(data, struct, output=out) assert_array_almost_equal(expected, out) def test_hit_or_miss02(self): struct = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] expected = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 0, 0, 1, 0, 0], [0, 1, 0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_hit_or_miss(data, struct) assert_array_almost_equal(expected, out) def test_hit_or_miss03(self): struct1 = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] struct2 = [[1, 1, 1], [0, 0, 0], [1, 1, 1]] expected = [[0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] for type_ in self.types: data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0]], type_) out = ndimage.binary_hit_or_miss(data, struct1, struct2) assert_array_almost_equal(expected, out) class TestDilateFix: def setup_method(self): # dilation related setup self.array = numpy.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 0, 0]], dtype=numpy.uint8) self.sq3x3 = numpy.ones((3, 3)) dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3) self.dilated3x3 = dilated3x3.view(numpy.uint8) def test_dilation_square_structure(self): result = ndimage.grey_dilation(self.array, structure=self.sq3x3) # +1 accounts for difference between grey and binary dilation assert_array_almost_equal(result, self.dilated3x3 + 1) def test_dilation_scalar_size(self): result = ndimage.grey_dilation(self.array, size=3) assert_array_almost_equal(result, self.dilated3x3) class TestBinaryOpeningClosing: def setup_method(self): a = numpy.zeros((5,5), dtype=bool) a[1:4, 1:4] = True a[4,4] = True self.array = a self.sq3x3 = numpy.ones((3,3)) self.opened_old = ndimage.binary_opening(self.array, self.sq3x3, 1, None, 0) self.closed_old = ndimage.binary_closing(self.array, self.sq3x3, 1, None, 0) def test_opening_new_arguments(self): opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None, 0, None, 0, False) assert_array_equal(opened_new, self.opened_old) def test_closing_new_arguments(self): closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None, 0, None, 0, False) assert_array_equal(closed_new, self.closed_old)
202,704
43.037584
113
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_c_api.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose from scipy import ndimage from scipy.ndimage import _ctest from scipy.ndimage import _ctest_oldapi from scipy.ndimage import _cytest from scipy._lib._ccallback import LowLevelCallable FILTER1D_FUNCTIONS = [ lambda filter_size: _ctest.filter1d(filter_size), lambda filter_size: _ctest_oldapi.filter1d(filter_size), lambda filter_size: _cytest.filter1d(filter_size, with_signature=False), lambda filter_size: LowLevelCallable(_cytest.filter1d(filter_size, with_signature=True)), lambda filter_size: LowLevelCallable.from_cython(_cytest, "_filter1d", _cytest.filter1d_capsule(filter_size)), ] FILTER2D_FUNCTIONS = [ lambda weights: _ctest.filter2d(weights), lambda weights: _ctest_oldapi.filter2d(weights), lambda weights: _cytest.filter2d(weights, with_signature=False), lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)), lambda weights: LowLevelCallable.from_cython(_cytest, "_filter2d", _cytest.filter2d_capsule(weights)), ] TRANSFORM_FUNCTIONS = [ lambda shift: _ctest.transform(shift), lambda shift: _ctest_oldapi.transform(shift), lambda shift: _cytest.transform(shift, with_signature=False), lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)), lambda shift: LowLevelCallable.from_cython(_cytest, "_transform", _cytest.transform_capsule(shift)), ] def test_generic_filter(): def filter2d(footprint_elements, weights): return (weights*footprint_elements).sum() def check(j): func = FILTER2D_FUNCTIONS[j] im = np.ones((20, 20)) im[:10,:10] = 0 footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) footprint_size = np.count_nonzero(footprint) weights = np.ones(footprint_size)/footprint_size res = ndimage.generic_filter(im, func(weights), footprint=footprint) std = ndimage.generic_filter(im, filter2d, footprint=footprint, extra_arguments=(weights,)) assert_allclose(res, std, err_msg="#{} failed".format(j)) for j, func in enumerate(FILTER2D_FUNCTIONS): check(j) def test_generic_filter1d(): def filter1d(input_line, output_line, filter_size): for i in range(output_line.size): output_line[i] = 0 for j in range(filter_size): output_line[i] += input_line[i+j] output_line /= filter_size def check(j): func = FILTER1D_FUNCTIONS[j] im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1)) filter_size = 3 res = ndimage.generic_filter1d(im, func(filter_size), filter_size) std = ndimage.generic_filter1d(im, filter1d, filter_size, extra_arguments=(filter_size,)) assert_allclose(res, std, err_msg="#{} failed".format(j)) for j, func in enumerate(FILTER1D_FUNCTIONS): check(j) def test_geometric_transform(): def transform(output_coordinates, shift): return output_coordinates[0] - shift, output_coordinates[1] - shift def check(j): func = TRANSFORM_FUNCTIONS[j] im = np.arange(12).reshape(4, 3).astype(np.float64) shift = 0.5 res = ndimage.geometric_transform(im, func(shift)) std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,)) assert_allclose(res, std, err_msg="#{} failed".format(j)) for j, func in enumerate(TRANSFORM_FUNCTIONS): check(j)
3,746
36.09901
106
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_testutils.py
from __future__ import division, print_function, absolute_import import numpy as np class _FakeMatrix(object): def __init__(self, data): self._data = data self.__array_interface__ = data.__array_interface__ class _FakeMatrix2(object): def __init__(self, data): self._data = data def __array__(self): return self._data def _get_array(shape, dtype): """ Get a test array of given shape and data type. Returned NxN matrices are posdef, and 2xN are banded-posdef. """ if len(shape) == 2 and shape[0] == 2: # yield a banded positive definite one x = np.zeros(shape, dtype=dtype) x[0, 1:] = -1 x[1] = 2 return x elif len(shape) == 2 and shape[0] == shape[1]: # always yield a positive definite matrix x = np.zeros(shape, dtype=dtype) j = np.arange(shape[0]) x[j, j] = 2 x[j[:-1], j[:-1]+1] = -1 x[j[:-1]+1, j[:-1]] = -1 return x else: np.random.seed(1234) return np.random.randn(*shape).astype(dtype) def _id(x): return x def assert_no_overwrite(call, shapes, dtypes=None): """ Test that a call does not overwrite its input arguments """ if dtypes is None: dtypes = [np.float32, np.float64, np.complex64, np.complex128] for dtype in dtypes: for order in ["C", "F"]: for faker in [_id, _FakeMatrix, _FakeMatrix2]: orig_inputs = [_get_array(s, dtype) for s in shapes] inputs = [faker(x.copy(order)) for x in orig_inputs] call(*inputs) msg = "call modified inputs [%r, %r]" % (dtype, faker) for a, b in zip(inputs, orig_inputs): np.testing.assert_equal(a, b, err_msg=msg)
1,814
26.5
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_generate_pyx.py
""" Code generator script to make the Cython BLAS and LAPACK wrappers from the files "cython_blas_signatures.txt" and "cython_lapack_signatures.txt" which contain the signatures for all the BLAS/LAPACK routines that should be included in the wrappers. """ import os from operator import itemgetter BASE_DIR = os.path.abspath(os.path.dirname(__file__)) fortran_types = {'int': 'integer', 'c': 'complex', 'd': 'double precision', 's': 'real', 'z': 'complex*16', 'char': 'character', 'bint': 'logical'} c_types = {'int': 'int', 'c': 'npy_complex64', 'd': 'double', 's': 'float', 'z': 'npy_complex128', 'char': 'char', 'bint': 'int', 'cselect1': '_cselect1', 'cselect2': '_cselect2', 'dselect2': '_dselect2', 'dselect3': '_dselect3', 'sselect2': '_sselect2', 'sselect3': '_sselect3', 'zselect1': '_zselect1', 'zselect2': '_zselect2'} def arg_names_and_types(args): return zip(*[arg.split(' *') for arg in args.split(', ')]) pyx_func_template = """ cdef extern from "{header_name}": void _fortran_{name} "F_FUNC({name}wrp, {upname}WRP)"({ret_type} *out, {fort_args}) nogil cdef {ret_type} {name}({args}) nogil: cdef {ret_type} out _fortran_{name}(&out, {argnames}) return out """ npy_types = {'c': 'npy_complex64', 'z': 'npy_complex128', 'cselect1': '_cselect1', 'cselect2': '_cselect2', 'dselect2': '_dselect2', 'dselect3': '_dselect3', 'sselect2': '_sselect2', 'sselect3': '_sselect3', 'zselect1': '_zselect1', 'zselect2': '_zselect2'} def arg_casts(arg): if arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2', '_dselect2', '_dselect3', '_sselect2', '_sselect3', '_zselect1', '_zselect2']: return '<{0}*>'.format(arg) return '' def pyx_decl_func(name, ret_type, args, header_name): argtypes, argnames = arg_names_and_types(args) # Fix the case where one of the arguments has the same name as the # abbreviation for the argument type. # Otherwise the variable passed as an argument is considered overwrites # the previous typedef and Cython compilation fails. if ret_type in argnames: argnames = [n if n != ret_type else ret_type + '_' for n in argnames] argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames] args = ', '.join([' *'.join([n, t]) for n, t in zip(argtypes, argnames)]) argtypes = [npy_types.get(t, t) for t in argtypes] fort_args = ', '.join([' *'.join([n, t]) for n, t in zip(argtypes, argnames)]) argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)] argnames = ', '.join(argnames) c_ret_type = c_types[ret_type] args = args.replace('lambda', 'lambda_') return pyx_func_template.format(name=name, upname=name.upper(), args=args, fort_args=fort_args, ret_type=ret_type, c_ret_type=c_ret_type, argnames=argnames, header_name=header_name) pyx_sub_template = """cdef extern from "{header_name}": void _fortran_{name} "F_FUNC({name},{upname})"({fort_args}) nogil cdef void {name}({args}) nogil: _fortran_{name}({argnames}) """ def pyx_decl_sub(name, args, header_name): argtypes, argnames = arg_names_and_types(args) argtypes = [npy_types.get(t, t) for t in argtypes] argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames] fort_args = ', '.join([' *'.join([n, t]) for n, t in zip(argtypes, argnames)]) argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)] argnames = ', '.join(argnames) args = args.replace('*lambda,', '*lambda_,').replace('*in,', '*in_,') return pyx_sub_template.format(name=name, upname=name.upper(), args=args, fort_args=fort_args, argnames=argnames, header_name=header_name) blas_pyx_preamble = '''# cython: boundscheck = False # cython: wraparound = False # cython: cdivision = True """ BLAS Functions for Cython ========================= Usable from Cython via:: cimport scipy.linalg.cython_blas These wrappers do not check for alignment of arrays. Alignment should be checked before these wrappers are used. Raw function pointers (Fortran-style pointer arguments): - {} """ # Within scipy, these wrappers can be used via relative or absolute cimport. # Examples: # from ..linalg cimport cython_blas # from scipy.linalg cimport cython_blas # cimport scipy.linalg.cython_blas as cython_blas # cimport ..linalg.cython_blas as cython_blas # Within scipy, if BLAS functions are needed in C/C++/Fortran, # these wrappers should not be used. # The original libraries should be linked directly. from __future__ import absolute_import cdef extern from "fortran_defs.h": pass from numpy cimport npy_complex64, npy_complex128 ''' def make_blas_pyx_preamble(all_sigs): names = [sig[0] for sig in all_sigs] return blas_pyx_preamble.format("\n- ".join(names)) lapack_pyx_preamble = '''""" LAPACK functions for Cython =========================== Usable from Cython via:: cimport scipy.linalg.cython_lapack This module provides Cython-level wrappers for all primary routines included in LAPACK 3.1.0 except for ``zcgesv`` since its interface is not consistent from LAPACK 3.1.0 to 3.6.0. It also provides some of the fixed-api auxiliary routines. These wrappers do not check for alignment of arrays. Alignment should be checked before these wrappers are used. Raw function pointers (Fortran-style pointer arguments): - {} """ # Within scipy, these wrappers can be used via relative or absolute cimport. # Examples: # from ..linalg cimport cython_lapack # from scipy.linalg cimport cython_lapack # cimport scipy.linalg.cython_lapack as cython_lapack # cimport ..linalg.cython_lapack as cython_lapack # Within scipy, if LAPACK functions are needed in C/C++/Fortran, # these wrappers should not be used. # The original libraries should be linked directly. from __future__ import absolute_import cdef extern from "fortran_defs.h": pass from numpy cimport npy_complex64, npy_complex128 cdef extern from "_lapack_subroutines.h": # Function pointer type declarations for # gees and gges families of functions. ctypedef bint _cselect1(npy_complex64*) ctypedef bint _cselect2(npy_complex64*, npy_complex64*) ctypedef bint _dselect2(d*, d*) ctypedef bint _dselect3(d*, d*, d*) ctypedef bint _sselect2(s*, s*) ctypedef bint _sselect3(s*, s*, s*) ctypedef bint _zselect1(npy_complex128*) ctypedef bint _zselect2(npy_complex128*, npy_complex128*) ''' def make_lapack_pyx_preamble(all_sigs): names = [sig[0] for sig in all_sigs] return lapack_pyx_preamble.format("\n- ".join(names)) blas_py_wrappers = """ # Python-accessible wrappers for testing: cdef inline bint _is_contiguous(double[:,:] a, int axis) nogil: return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1) cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) nogil: cdef: int n = cx.shape[0] int incx = cx.strides[0] // sizeof(cx[0]) int incy = cy.strides[0] // sizeof(cy[0]) return cdotc(&n, &cx[0], &incx, &cy[0], &incy) cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) nogil: cdef: int n = cx.shape[0] int incx = cx.strides[0] // sizeof(cx[0]) int incy = cy.strides[0] // sizeof(cy[0]) return cdotu(&n, &cx[0], &incx, &cy[0], &incy) cpdef double _test_dasum(double[:] dx) nogil: cdef: int n = dx.shape[0] int incx = dx.strides[0] // sizeof(dx[0]) return dasum(&n, &dx[0], &incx) cpdef double _test_ddot(double[:] dx, double[:] dy) nogil: cdef: int n = dx.shape[0] int incx = dx.strides[0] // sizeof(dx[0]) int incy = dy.strides[0] // sizeof(dy[0]) return ddot(&n, &dx[0], &incx, &dy[0], &incy) cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta, double[:,:] c) nogil except -1: cdef: char *transa char *transb int m, n, k, lda, ldb, ldc double *a0=&a[0,0] double *b0=&b[0,0] double *c0=&c[0,0] # In the case that c is C contiguous, swap a and b and # swap whether or not each of them is transposed. # This can be done because a.dot(b) = b.T.dot(a.T).T. if _is_contiguous(c, 1): if _is_contiguous(a, 1): transb = 'n' ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1 elif _is_contiguous(a, 0): transb = 't' ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1 else: with gil: raise ValueError("Input 'a' is neither C nor Fortran contiguous.") if _is_contiguous(b, 1): transa = 'n' lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1 elif _is_contiguous(b, 0): transa = 't' lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1 else: with gil: raise ValueError("Input 'b' is neither C nor Fortran contiguous.") k = b.shape[0] if k != a.shape[1]: with gil: raise ValueError("Shape mismatch in input arrays.") m = b.shape[1] n = a.shape[0] if n != c.shape[0] or m != c.shape[1]: with gil: raise ValueError("Output array does not have the correct shape.") ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1 dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0, &ldb, &beta, c0, &ldc) elif _is_contiguous(c, 0): if _is_contiguous(a, 1): transa = 't' lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1 elif _is_contiguous(a, 0): transa = 'n' lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1 else: with gil: raise ValueError("Input 'a' is neither C nor Fortran contiguous.") if _is_contiguous(b, 1): transb = 't' ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1 elif _is_contiguous(b, 0): transb = 'n' ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1 else: with gil: raise ValueError("Input 'b' is neither C nor Fortran contiguous.") m = a.shape[0] k = a.shape[1] if k != b.shape[0]: with gil: raise ValueError("Shape mismatch in input arrays.") n = b.shape[1] if m != c.shape[0] or n != c.shape[1]: with gil: raise ValueError("Output array does not have the correct shape.") ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1 dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0, &ldb, &beta, c0, &ldc) else: with gil: raise ValueError("Input 'c' is neither C nor Fortran contiguous.") return 0 cpdef double _test_dnrm2(double[:] x) nogil: cdef: int n = x.shape[0] int incx = x.strides[0] // sizeof(x[0]) return dnrm2(&n, &x[0], &incx) cpdef double _test_dzasum(double complex[:] zx) nogil: cdef: int n = zx.shape[0] int incx = zx.strides[0] // sizeof(zx[0]) return dzasum(&n, &zx[0], &incx) cpdef double _test_dznrm2(double complex[:] x) nogil: cdef: int n = x.shape[0] int incx = x.strides[0] // sizeof(x[0]) return dznrm2(&n, &x[0], &incx) cpdef int _test_icamax(float complex[:] cx) nogil: cdef: int n = cx.shape[0] int incx = cx.strides[0] // sizeof(cx[0]) return icamax(&n, &cx[0], &incx) cpdef int _test_idamax(double[:] dx) nogil: cdef: int n = dx.shape[0] int incx = dx.strides[0] // sizeof(dx[0]) return idamax(&n, &dx[0], &incx) cpdef int _test_isamax(float[:] sx) nogil: cdef: int n = sx.shape[0] int incx = sx.strides[0] // sizeof(sx[0]) return isamax(&n, &sx[0], &incx) cpdef int _test_izamax(double complex[:] zx) nogil: cdef: int n = zx.shape[0] int incx = zx.strides[0] // sizeof(zx[0]) return izamax(&n, &zx[0], &incx) cpdef float _test_sasum(float[:] sx) nogil: cdef: int n = sx.shape[0] int incx = sx.shape[0] // sizeof(sx[0]) return sasum(&n, &sx[0], &incx) cpdef float _test_scasum(float complex[:] cx) nogil: cdef: int n = cx.shape[0] int incx = cx.strides[0] // sizeof(cx[0]) return scasum(&n, &cx[0], &incx) cpdef float _test_scnrm2(float complex[:] x) nogil: cdef: int n = x.shape[0] int incx = x.strides[0] // sizeof(x[0]) return scnrm2(&n, &x[0], &incx) cpdef float _test_sdot(float[:] sx, float[:] sy) nogil: cdef: int n = sx.shape[0] int incx = sx.strides[0] // sizeof(sx[0]) int incy = sy.strides[0] // sizeof(sy[0]) return sdot(&n, &sx[0], &incx, &sy[0], &incy) cpdef float _test_snrm2(float[:] x) nogil: cdef: int n = x.shape[0] int incx = x.shape[0] // sizeof(x[0]) return snrm2(&n, &x[0], &incx) cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) nogil: cdef: int n = zx.shape[0] int incx = zx.strides[0] // sizeof(zx[0]) int incy = zy.strides[0] // sizeof(zy[0]) return zdotc(&n, &zx[0], &incx, &zy[0], &incy) cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) nogil: cdef: int n = zx.shape[0] int incx = zx.strides[0] // sizeof(zx[0]) int incy = zy.strides[0] // sizeof(zy[0]) return zdotu(&n, &zx[0], &incx, &zy[0], &incy) """ def generate_blas_pyx(func_sigs, sub_sigs, all_sigs, header_name): funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs) subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,))) for s in sub_sigs) return make_blas_pyx_preamble(all_sigs) + funcs + subs + blas_py_wrappers lapack_py_wrappers = """ # Python accessible wrappers for testing: def _test_dlamch(cmach): # This conversion is necessary to handle Python 3 strings. cmach_bytes = bytes(cmach) # Now that it is a bytes representation, a non-temporary variable # must be passed as a part of the function call. cdef char* cmach_char = cmach_bytes return dlamch(cmach_char) def _test_slamch(cmach): # This conversion is necessary to handle Python 3 strings. cmach_bytes = bytes(cmach) # Now that it is a bytes representation, a non-temporary variable # must be passed as a part of the function call. cdef char* cmach_char = cmach_bytes return slamch(cmach_char) """ def generate_lapack_pyx(func_sigs, sub_sigs, all_sigs, header_name): funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs) subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,))) for s in sub_sigs) preamble = make_lapack_pyx_preamble(all_sigs) return preamble + funcs + subs + lapack_py_wrappers pxd_template = """ctypedef {ret_type} {name}_t({args}) nogil cdef {name}_t *{name}_f """ pxd_template = """cdef {ret_type} {name}({args}) nogil """ def pxd_decl(name, ret_type, args): args = args.replace('lambda', 'lambda_').replace('*in,', '*in_,') return pxd_template.format(name=name, ret_type=ret_type, args=args) blas_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport. # Examples: # from ..linalg cimport cython_blas # from scipy.linalg cimport cython_blas # cimport scipy.linalg.cython_blas as cython_blas # cimport ..linalg.cython_blas as cython_blas # Within scipy, if BLAS functions are needed in C/C++/Fortran, # these wrappers should not be used. # The original libraries should be linked directly. ctypedef float s ctypedef double d ctypedef float complex c ctypedef double complex z """ def generate_blas_pxd(all_sigs): body = '\n'.join(pxd_decl(*sig) for sig in all_sigs) return blas_pxd_preamble + body lapack_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport. # Examples: # from ..linalg cimport cython_lapack # from scipy.linalg cimport cython_lapack # cimport scipy.linalg.cython_lapack as cython_lapack # cimport ..linalg.cython_lapack as cython_lapack # Within scipy, if LAPACK functions are needed in C/C++/Fortran, # these wrappers should not be used. # The original libraries should be linked directly. ctypedef float s ctypedef double d ctypedef float complex c ctypedef double complex z # Function pointer type declarations for # gees and gges families of functions. ctypedef bint cselect1(c*) ctypedef bint cselect2(c*, c*) ctypedef bint dselect2(d*, d*) ctypedef bint dselect3(d*, d*, d*) ctypedef bint sselect2(s*, s*) ctypedef bint sselect3(s*, s*, s*) ctypedef bint zselect1(z*) ctypedef bint zselect2(z*, z*) """ def generate_lapack_pxd(all_sigs): return lapack_pxd_preamble + '\n'.join(pxd_decl(*sig) for sig in all_sigs) fortran_template = """ subroutine {name}wrp(ret, {argnames}) external {wrapper} {ret_type} {wrapper} {ret_type} ret {argdecls} ret = {wrapper}({argnames}) end """ dims = {'work': '(*)', 'ab': '(ldab,*)', 'a': '(lda,*)', 'dl': '(*)', 'd': '(*)', 'du': '(*)', 'ap': '(*)', 'e': '(*)', 'lld': '(*)'} def process_fortran_name(name, funcname): if 'inc' in name: return name xy_exclusions = ['ladiv', 'lapy2', 'lapy3'] if ('x' in name or 'y' in name) and funcname[1:] not in xy_exclusions: return name + '(n)' if name in dims: return name + dims[name] return name def fort_subroutine_wrapper(name, ret_type, args): if name[0] in ['c', 's'] or name in ['zladiv', 'zdotu', 'zdotc']: wrapper = 'w' + name else: wrapper = name types, names = arg_names_and_types(args) argnames = ', '.join(names) names = [process_fortran_name(n, name) for n in names] argdecls = '\n '.join('{0} {1}'.format(fortran_types[t], n) for n, t in zip(names, types)) return fortran_template.format(name=name, wrapper=wrapper, argnames=argnames, argdecls=argdecls, ret_type=fortran_types[ret_type]) def generate_fortran(func_sigs): return "\n".join(fort_subroutine_wrapper(*sig) for sig in func_sigs) def make_c_args(args): types, names = arg_names_and_types(args) types = [c_types[arg] for arg in types] return ', '.join('{0} *{1}'.format(t, n) for t, n in zip(types, names)) c_func_template = "void F_FUNC({name}wrp, {upname}WRP)({return_type} *ret, {args});\n" def c_func_decl(name, return_type, args): args = make_c_args(args) return_type = c_types[return_type] return c_func_template.format(name=name, upname=name.upper(), return_type=return_type, args=args) c_sub_template = "void F_FUNC({name},{upname})({args});\n" def c_sub_decl(name, return_type, args): args = make_c_args(args) return c_sub_template.format(name=name, upname=name.upper(), args=args) c_preamble = """#ifndef SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H #define SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H #include "fortran_defs.h" #include "numpy/arrayobject.h" """ lapack_decls = """ typedef int (*_cselect1)(npy_complex64*); typedef int (*_cselect2)(npy_complex64*, npy_complex64*); typedef int (*_dselect2)(double*, double*); typedef int (*_dselect3)(double*, double*, double*); typedef int (*_sselect2)(float*, float*); typedef int (*_sselect3)(float*, float*, float*); typedef int (*_zselect1)(npy_complex128*); typedef int (*_zselect2)(npy_complex128*, npy_complex128*); """ cpp_guard = """ #ifdef __cplusplus extern "C" { #endif """ c_end = """ #ifdef __cplusplus } #endif #endif """ def generate_c_header(func_sigs, sub_sigs, all_sigs, lib_name): funcs = "".join(c_func_decl(*sig) for sig in func_sigs) subs = "\n" + "".join(c_sub_decl(*sig) for sig in sub_sigs) if lib_name == 'LAPACK': preamble = (c_preamble.format(lib=lib_name) + lapack_decls) else: preamble = c_preamble.format(lib=lib_name) return "".join([preamble, cpp_guard, funcs, subs, c_end]) def split_signature(sig): name_and_type, args = sig[:-1].split('(') ret_type, name = name_and_type.split(' ') return name, ret_type, args def filter_lines(lines): lines = [line.strip() for line in lines if line != '\n' and line[0] != '#'] func_sigs = [split_signature(line) for line in lines if line.split(' ')[0] != 'void'] sub_sigs = [split_signature(line) for line in lines if line.split(' ')[0] == 'void'] all_sigs = list(sorted(func_sigs + sub_sigs, key=itemgetter(0))) return func_sigs, sub_sigs, all_sigs def all_newer(src_files, dst_files): from distutils.dep_util import newer return all(os.path.exists(dst) and newer(dst, src) for dst in dst_files for src in src_files) def make_all(blas_signature_file="cython_blas_signatures.txt", lapack_signature_file="cython_lapack_signatures.txt", blas_name="cython_blas", lapack_name="cython_lapack", blas_fortran_name="_blas_subroutine_wrappers.f", lapack_fortran_name="_lapack_subroutine_wrappers.f", blas_header_name="_blas_subroutines.h", lapack_header_name="_lapack_subroutines.h"): src_files = (os.path.abspath(__file__), blas_signature_file, lapack_signature_file) dst_files = (blas_name + '.pyx', blas_name + '.pxd', blas_fortran_name, blas_header_name, lapack_name + '.pyx', lapack_name + '.pxd', lapack_fortran_name, lapack_header_name) os.chdir(BASE_DIR) if all_newer(src_files, dst_files): print("scipy/linalg/_generate_pyx.py: all files up-to-date") return comments = ["This file was generated by _generate_pyx.py.\n", "Do not edit this file directly.\n"] ccomment = ''.join(['/* ' + line.rstrip() + ' */\n' for line in comments]) + '\n' pyxcomment = ''.join(['# ' + line for line in comments]) + '\n' fcomment = ''.join(['c ' + line for line in comments]) + '\n' with open(blas_signature_file, 'r') as f: blas_sigs = f.readlines() blas_sigs = filter_lines(blas_sigs) blas_pyx = generate_blas_pyx(*(blas_sigs + (blas_header_name,))) with open(blas_name + '.pyx', 'w') as f: f.write(pyxcomment) f.write(blas_pyx) blas_pxd = generate_blas_pxd(blas_sigs[2]) with open(blas_name + '.pxd', 'w') as f: f.write(pyxcomment) f.write(blas_pxd) blas_fortran = generate_fortran(blas_sigs[0]) with open(blas_fortran_name, 'w') as f: f.write(fcomment) f.write(blas_fortran) blas_c_header = generate_c_header(*(blas_sigs + ('BLAS',))) with open(blas_header_name, 'w') as f: f.write(ccomment) f.write(blas_c_header) with open(lapack_signature_file, 'r') as f: lapack_sigs = f.readlines() lapack_sigs = filter_lines(lapack_sigs) lapack_pyx = generate_lapack_pyx(*(lapack_sigs + (lapack_header_name,))) with open(lapack_name + '.pyx', 'w') as f: f.write(pyxcomment) f.write(lapack_pyx) lapack_pxd = generate_lapack_pxd(lapack_sigs[2]) with open(lapack_name + '.pxd', 'w') as f: f.write(pyxcomment) f.write(lapack_pxd) lapack_fortran = generate_fortran(lapack_sigs[0]) with open(lapack_fortran_name, 'w') as f: f.write(fcomment) f.write(lapack_fortran) lapack_c_header = generate_c_header(*(lapack_sigs + ('LAPACK',))) with open(lapack_header_name, 'w') as f: f.write(ccomment) f.write(lapack_c_header) if __name__ == '__main__': make_all()
24,552
32.31479
101
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/interpolative.py
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** # Python module for interfacing with `id_dist`. r""" ====================================================================== Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) ====================================================================== .. moduleauthor:: Kenneth L. Ho <[email protected]> .. versionadded:: 0.13 .. currentmodule:: scipy.linalg.interpolative An interpolative decomposition (ID) of a matrix :math:`A \in \mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a factorization .. math:: A \Pi = \begin{bmatrix} A \Pi_{1} & A \Pi_{2} \end{bmatrix} = A \Pi_{1} \begin{bmatrix} I & T \end{bmatrix}, where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with :math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` are the *skeleton* and *interpolation matrices*, respectively. If :math:`A` does not have exact rank :math:`k`, then there exists an approximation in the form of an ID such that :math:`A = BP + E`, where :math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + 1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k + 1}` is the best possible error for a rank-:math:`k` approximation and, in fact, is achieved by the singular value decomposition (SVD) :math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k \times k}` is diagonal with nonnegative entries. The principal advantages of using an ID over an SVD are that: - it is cheaper to construct; - it preserves the structure of :math:`A`; and - it is more efficient to compute with in light of the identity submatrix of :math:`P`. Routines ======== Main functionality: .. autosummary:: :toctree: generated/ interp_decomp reconstruct_matrix_from_id reconstruct_interp_matrix reconstruct_skel_matrix id_to_svd svd estimate_spectral_norm estimate_spectral_norm_diff estimate_rank Support functions: .. autosummary:: :toctree: generated/ seed rand References ========== This module uses the ID software package [1]_ by Martinsson, Rokhlin, Shkolnisky, and Tygert, which is a Fortran library for computing IDs using various algorithms, including the rank-revealing QR approach of [2]_ and the more recent randomized methods described in [3]_, [4]_, and [5]_. This module exposes its functionality in a way convenient for Python users. Note that this module does not add any functionality beyond that of organizing a simpler and more consistent interface. We advise the user to consult also the `documentation for the ID package <http://tygert.com/id_doc.4.pdf>`_. .. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a software package for low-rank approximation of matrices via interpolative decompositions, version 0.2." http://tygert.com/id_doc.4.pdf. .. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, 2005. `doi:10.1137/030602678 <http://dx.doi.org/10.1137/030602678>`_. .. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. Tygert. "Randomized algorithms for the low-rank approximation of matrices." *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. `doi:10.1073/pnas.0709640104 <http://dx.doi.org/10.1073/pnas.0709640104>`_. .. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 (1): 47--68, 2011. `doi:10.1016/j.acha.2010.02.003 <http://dx.doi.org/10.1016/j.acha.2010.02.003>`_. .. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast randomized algorithm for the approximation of matrices." *Appl. Comput. Harmon. Anal.* 25 (3): 335--366, 2008. `doi:10.1016/j.acha.2007.12.002 <http://dx.doi.org/10.1016/j.acha.2007.12.002>`_. Tutorial ======== Initializing ------------ The first step is to import :mod:`scipy.linalg.interpolative` by issuing the command: >>> import scipy.linalg.interpolative as sli Now let's build a matrix. For this, we consider a Hilbert matrix, which is well know to have low rank: >>> from scipy.linalg import hilbert >>> n = 1000 >>> A = hilbert(n) We can also do this explicitly via: >>> import numpy as np >>> n = 1000 >>> A = np.empty((n, n), order='F') >>> for j in range(n): >>> for i in range(m): >>> A[i,j] = 1. / (i + j + 1) Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This instantiates the matrix in Fortran-contiguous order and is important for avoiding data copying when passing to the backend. We then define multiplication routines for the matrix by regarding it as a :class:`scipy.sparse.linalg.LinearOperator`: >>> from scipy.sparse.linalg import aslinearoperator >>> L = aslinearoperator(A) This automatically sets up methods describing the action of the matrix and its adjoint on a vector. Computing an ID --------------- We have several choices of algorithm to compute an ID. These fall largely according to two dichotomies: 1. how the matrix is represented, i.e., via its entries or via its action on a vector; and 2. whether to approximate it to a fixed relative precision or to a fixed rank. We step through each choice in turn below. In all cases, the ID is represented by three parameters: 1. a rank ``k``; 2. an index array ``idx``; and 3. interpolation coefficients ``proj``. The ID is specified by the relation ``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. From matrix entries ................... We first consider a matrix given in terms of its entries. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(A, eps) where ``eps < 1`` is the desired precision. To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(A, k) where ``k >= 1`` is the desired rank. Both algorithms use random sampling and are usually faster than the corresponding older, deterministic algorithms, which can be accessed via the commands: >>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) and: >>> idx, proj = sli.interp_decomp(A, k, rand=False) respectively. From matrix action .................. Now consider a matrix given in terms of its action on a vector as a :class:`scipy.sparse.linalg.LinearOperator`. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(L, eps) To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(L, k) These algorithms are randomized. Reconstructing an ID -------------------- The ID routines above do not output the skeleton and interpolation matrices explicitly but instead return the relevant information in a more compact (and sometimes more useful) form. To build these matrices, write: >>> B = sli.reconstruct_skel_matrix(A, k, idx) for the skeleton matrix and: >>> P = sli.reconstruct_interp_matrix(idx, proj) for the interpolation matrix. The ID approximation can then be computed as: >>> C = np.dot(B, P) This can also be constructed directly using: >>> C = sli.reconstruct_matrix_from_id(B, idx, proj) without having to first compute ``P``. Alternatively, this can be done explicitly as well using: >>> B = A[:,idx[:k]] >>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] >>> C = np.dot(B, P) Computing an SVD ---------------- An ID can be converted to an SVD via the command: >>> U, S, V = sli.id_to_svd(B, idx, proj) The SVD approximation is then: >>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) The SVD can also be computed "fresh" by combining both the ID and conversion steps into one command. Following the various ID algorithms above, there are correspondingly various SVD algorithms that one can employ. From matrix entries ................... We consider first SVD algorithms for a matrix given in terms of its entries. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(A, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(A, k) Both algorithms use random sampling; for the determinstic versions, issue the keyword ``rand=False`` as above. From matrix action .................. Now consider a matrix given in terms of its action on a vector. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(L, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(L, k) Utility routines ---------------- Several utility routines are also available. To estimate the spectral norm of a matrix, use: >>> snorm = sli.estimate_spectral_norm(A) This algorithm is based on the randomized power method and thus requires only matrix-vector products. The number of iterations to take can be set using the keyword ``its`` (default: ``its=20``). The matrix is interpreted as a :class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it as a :class:`numpy.ndarray`, in which case it is trivially converted using :func:`scipy.sparse.linalg.aslinearoperator`. The same algorithm can also estimate the spectral norm of the difference of two matrices ``A1`` and ``A2`` as follows: >>> diff = sli.estimate_spectral_norm_diff(A1, A2) This is often useful for checking the accuracy of a matrix approximation. Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank of a matrix as well. This can be done with either: >>> k = sli.estimate_rank(A, eps) or: >>> k = sli.estimate_rank(L, eps) depending on the representation. The parameter ``eps`` controls the definition of the numerical rank. Finally, the random number generation required for all randomized routines can be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed values to their original values, use: >>> sli.seed('default') To specify the seed values, use: >>> sli.seed(s) where ``s`` must be an integer or array of 55 floats. If an integer, the array of floats is obtained by using `np.random.rand` with the given integer seed. To simply generate some random numbers, type: >>> sli.rand(n) where ``n`` is the number of random numbers to generate. Remarks ------- The above functions all automatically detect the appropriate interface and work with both real and complex data types, passing input arguments to the proper backend routine. """ import scipy.linalg._interpolative_backend as backend import numpy as np _DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") _TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") def _is_real(A): try: if A.dtype == np.complex128: return False elif A.dtype == np.float64: return True else: raise _DTYPE_ERROR except AttributeError: raise _TYPE_ERROR def seed(seed=None): """ Seed the internal random number generator used in this ID package. The generator is a lagged Fibonacci method with 55-element internal state. Parameters ---------- seed : int, sequence, 'default', optional If 'default', the random seed is reset to a default value. If `seed` is a sequence containing 55 floating-point numbers in range [0,1], these are used to set the internal state of the generator. If the value is an integer, the internal state is obtained from `numpy.random.RandomState` (MT19937) with the integer used as the initial seed. If `seed` is omitted (None), `numpy.random` is used to initialize the generator. """ # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`, # and :func:`backend.id_srando`. if isinstance(seed, str) and seed == 'default': backend.id_srando() elif hasattr(seed, '__len__'): state = np.asfortranarray(seed, dtype=float) if state.shape != (55,): raise ValueError("invalid input size") elif state.min() < 0 or state.max() > 1: raise ValueError("values not in range [0,1]") backend.id_srandi(state) elif seed is None: backend.id_srandi(np.random.rand(55)) else: rnd = np.random.RandomState(seed) backend.id_srandi(rnd.rand(55)) def rand(*shape): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. This routine is used for all random number generation in this package and can affect ID and SVD results. Parameters ---------- shape Shape of output array """ # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`. return backend.id_srand(np.prod(shape)).reshape(shape) def interp_decomp(A, eps_or_k, rand=True): """ Compute ID of a matrix. An ID of a matrix `A` is a factorization defined by a rank `k`, a column index array `idx`, and interpolation coefficients `proj` such that:: numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] The original matrix can then be reconstructed as:: numpy.hstack([A[:,idx[:k]], numpy.dot(A[:,idx[:k]], proj)] )[:,numpy.argsort(idx)] or via the routine :func:`reconstruct_matrix_from_id`. This can equivalently be written as:: numpy.dot(A[:,idx[:k]], numpy.hstack([numpy.eye(k), proj]) )[:,np.argsort(idx)] in terms of the skeleton and interpolation matrices:: B = A[:,idx[:k]] and:: P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] respectively. See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. The ID can be computed to any relative precision or rank (depending on the value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then this function has the output signature:: k, idx, proj = interp_decomp(A, eps_or_k) Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output signature is:: idx, proj = interp_decomp(A, eps_or_k) .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_id`, :func:`backend.iddp_aid`, :func:`backend.iddp_rid`, :func:`backend.iddr_id`, :func:`backend.iddr_aid`, :func:`backend.iddr_rid`, :func:`backend.idzp_id`, :func:`backend.idzp_aid`, :func:`backend.idzp_rid`, :func:`backend.idzr_id`, :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` Matrix to be factored eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- k : int Rank required to achieve specified relative precision if `eps_or_k < 1`. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: k, idx, proj = backend.iddp_aid(eps, A) else: k, idx, proj = backend.idzp_aid(eps, A) else: if real: k, idx, proj = backend.iddp_id(eps, A) else: k, idx, proj = backend.idzp_id(eps, A) return k, idx - 1, proj else: k = int(eps_or_k) if rand: if real: idx, proj = backend.iddr_aid(A, k) else: idx, proj = backend.idzr_aid(A, k) else: if real: idx, proj = backend.iddr_id(A, k) else: idx, proj = backend.idzr_id(A, k) return idx - 1, proj elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if eps_or_k < 1: eps = eps_or_k if real: k, idx, proj = backend.iddp_rid(eps, m, n, matveca) else: k, idx, proj = backend.idzp_rid(eps, m, n, matveca) return k, idx - 1, proj else: k = int(eps_or_k) if real: idx, proj = backend.iddr_rid(m, n, matveca, k) else: idx, proj = backend.idzr_rid(m, n, matveca, k) return idx - 1, proj else: raise _TYPE_ERROR def reconstruct_matrix_from_id(B, idx, proj): """ Reconstruct matrix from its ID. A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconid` and :func:`backend.idz_reconid`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Reconstructed matrix. """ if _is_real(B): return backend.idd_reconid(B, idx + 1, proj) else: return backend.idz_reconid(B, idx + 1, proj) def reconstruct_interp_matrix(idx, proj): """ Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients `idx` and `proj`, respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `B` via:: numpy.dot(B, P) See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconint` and :func:`backend.idz_reconint`. Parameters ---------- idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Interpolation matrix. """ if _is_real(proj): return backend.idd_reconint(idx + 1, proj) else: return backend.idz_reconint(idx + 1, proj) def reconstruct_skel_matrix(A, k, idx): """ Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix `A` and its ID rank and indices `k` and `idx`, respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_interp_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_copycols` and :func:`backend.idz_copycols`. Parameters ---------- A : :class:`numpy.ndarray` Original matrix. k : int Rank of ID. idx : :class:`numpy.ndarray` Column index array. Returns ------- :class:`numpy.ndarray` Skeleton matrix. """ if _is_real(A): return backend.idd_copycols(A, k, idx + 1) else: return backend.idz_copycols(A, k, idx + 1) def id_to_svd(B, idx, proj): """ Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_id2svd` and :func:`backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ if _is_real(B): U, V, S = backend.idd_id2svd(B, idx + 1, proj) else: U, V, S = backend.idz_id2svd(B, idx + 1, proj) return U, S, V def estimate_spectral_norm(A, its=20): """ Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_snorm` and :func:`backend.idz_snorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) m, n = A.shape matvec = lambda x: A. matvec(x) matveca = lambda x: A.rmatvec(x) if _is_real(A): return backend.idd_snorm(m, n, matveca, matvec, its=its) else: return backend.idz_snorm(m, n, matveca, matvec, its=its) def estimate_spectral_norm_diff(A, B, its=20): """ Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_diffsnorm` and :func:`backend.idz_diffsnorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). B : :class:`scipy.sparse.linalg.LinearOperator` Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate of matrix difference. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) B = aslinearoperator(B) m, n = A.shape matvec1 = lambda x: A. matvec(x) matveca1 = lambda x: A.rmatvec(x) matvec2 = lambda x: B. matvec(x) matveca2 = lambda x: B.rmatvec(x) if _is_real(A): return backend.idd_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) else: return backend.idz_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) def svd(A, eps_or_k, rand=True): """ Compute SVD of a matrix via an ID. An SVD of a matrix `A` is a factorization:: A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) where `U` and `V` have orthonormal columns and `S` is nonnegative. The SVD can be computed to any relative precision or rank (depending on the value of `eps_or_k`). See also :func:`interp_decomp` and :func:`id_to_svd`. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`, :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`, :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`, :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`, :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`, :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix to be factored, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: U, V, S = backend.iddp_asvd(eps, A) else: U, V, S = backend.idzp_asvd(eps, A) else: if real: U, V, S = backend.iddp_svd(eps, A) else: U, V, S = backend.idzp_svd(eps, A) else: k = int(eps_or_k) if k > min(A.shape): raise ValueError("Approximation rank %s exceeds min(A.shape) = " " %s " % (k, min(A.shape))) if rand: if real: U, V, S = backend.iddr_asvd(A, k) else: U, V, S = backend.idzr_asvd(A, k) else: if real: U, V, S = backend.iddr_svd(A, k) else: U, V, S = backend.idzr_svd(A, k) elif isinstance(A, LinearOperator): m, n = A.shape matvec = lambda x: A.matvec(x) matveca = lambda x: A.rmatvec(x) if eps_or_k < 1: eps = eps_or_k if real: U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec) else: U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec) else: k = int(eps_or_k) if real: U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k) else: U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k) else: raise _TYPE_ERROR return U, S, V def estimate_rank(A, eps): """ Estimate matrix rank to a specified relative precision using randomized methods. The matrix `A` can be given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used for each case. If `A` is of type :class:`numpy.ndarray`, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`, :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix whose rank is to be estimated, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `rmatvec` method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. Returns ------- int Estimated matrix rank. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if real: rank = backend.idd_estrank(eps, A) else: rank = backend.idz_estrank(eps, A) if rank == 0: # special return value for nearly full rank rank = min(A.shape) return rank elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if real: return backend.idd_findrank(eps, m, n, matveca) else: return backend.idz_findrank(eps, m, n, matveca) else: raise _TYPE_ERROR
31,129
31.026749
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_decomp_qz.py
from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy import asarray_chkfinite from .misc import LinAlgError, _datacopied, LinAlgWarning from .lapack import get_lapack_funcs from scipy._lib.six import callable __all__ = ['qz', 'ordqz'] _double_precision = ['i', 'l', 'd'] def _select_function(sort): if callable(sort): # assume the user knows what they're doing sfunction = sort elif sort == 'lhp': sfunction = _lhp elif sort == 'rhp': sfunction = _rhp elif sort == 'iuc': sfunction = _iuc elif sort == 'ouc': sfunction = _ouc else: raise ValueError("sort parameter must be None, a callable, or " "one of ('lhp','rhp','iuc','ouc')") return sfunction def _lhp(x, y): out = np.empty_like(x, dtype=bool) nonzero = (y != 0) # handles (x, y) = (0, 0) too out[~nonzero] = False out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0) return out def _rhp(x, y): out = np.empty_like(x, dtype=bool) nonzero = (y != 0) # handles (x, y) = (0, 0) too out[~nonzero] = False out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0) return out def _iuc(x, y): out = np.empty_like(x, dtype=bool) nonzero = (y != 0) # handles (x, y) = (0, 0) too out[~nonzero] = False out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0) return out def _ouc(x, y): out = np.empty_like(x, dtype=bool) xzero = (x == 0) yzero = (y == 0) out[xzero & yzero] = False out[~xzero & yzero] = True out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0) return out def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, overwrite_b=False, check_finite=True): if sort is not None: # Disabled due to segfaults on win32, see ticket 1717. raise ValueError("The 'sort' input of qz() has to be None and will be " "removed in a future release. Use ordqz instead.") if output not in ['real', 'complex', 'r', 'c']: raise ValueError("argument must be 'real', or 'complex'") if check_finite: a1 = asarray_chkfinite(A) b1 = asarray_chkfinite(B) else: a1 = np.asarray(A) b1 = np.asarray(B) a_m, a_n = a1.shape b_m, b_n = b1.shape if not (a_m == a_n == b_m == b_n): raise ValueError("Array dimensions must be square and agree") typa = a1.dtype.char if output in ['complex', 'c'] and typa not in ['F', 'D']: if typa in _double_precision: a1 = a1.astype('D') typa = 'D' else: a1 = a1.astype('F') typa = 'F' typb = b1.dtype.char if output in ['complex', 'c'] and typb not in ['F', 'D']: if typb in _double_precision: b1 = b1.astype('D') typb = 'D' else: b1 = b1.astype('F') typb = 'F' overwrite_a = overwrite_a or (_datacopied(a1, A)) overwrite_b = overwrite_b or (_datacopied(b1, B)) gges, = get_lapack_funcs(('gges',), (a1, b1)) if lwork is None or lwork == -1: # get optimal work array size result = gges(lambda x: None, a1, b1, lwork=-1) lwork = result[-2][0].real.astype(np.int) sfunction = lambda x: None result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a, overwrite_b=overwrite_b, sort_t=0) info = result[-1] if info < 0: raise ValueError("Illegal value in argument {} of gges".format(-info)) elif info > 0 and info <= a_n: warnings.warn("The QZ iteration failed. (a,b) are not in Schur " "form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be " "correct for J={},...,N".format(info-1), LinAlgWarning, stacklevel=3) elif info == a_n+1: raise LinAlgError("Something other than QZ iteration failed") elif info == a_n+2: raise LinAlgError("After reordering, roundoff changed values of some " "complex eigenvalues so that leading eigenvalues " "in the Generalized Schur form no longer satisfy " "sort=True. This could also be due to scaling.") elif info == a_n+3: raise LinAlgError("Reordering failed in <s,d,c,z>tgsen") return result, gges.typecode def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, overwrite_b=False, check_finite=True): """ QZ decomposition for generalized eigenvalues of a pair of matrices. The QZ, or generalized Schur, decomposition for a pair of N x N nonsymmetric matrices (A,B) is:: (A,B) = (Q*AA*Z', Q*BB*Z') where AA, BB is in generalized Schur form if BB is upper-triangular with non-negative diagonal and AA is upper-triangular, or for real QZ decomposition (``output='real'``) block upper triangular with 1x1 and 2x2 blocks. In this case, the 1x1 blocks correspond to real generalized eigenvalues and 2x2 blocks are 'standardized' by making the corresponding elements of BB have the form:: [ a 0 ] [ 0 b ] and the pair of corresponding 2x2 blocks in AA and BB will have a complex conjugate pair of generalized eigenvalues. If (``output='complex'``) or A and B are complex matrices, Z' denotes the conjugate-transpose of Z. Q and Z are unitary matrices. Parameters ---------- A : (N, N) array_like 2d array to decompose B : (N, N) array_like 2d array to decompose output : {'real', 'complex'}, optional Construct the real or complex QZ decomposition for real matrices. Default is 'real'. lwork : int, optional Work array size. If None or -1, it is automatically computed. sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead. Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). For real matrix pairs, the sort function takes three real arguments (alphar, alphai, beta). The eigenvalue ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or output='complex', the sort function takes two complex arguments (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) Defaults to None (no sorting). overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) overwrite_b : bool, optional Whether to overwrite data in b (may improve performance) check_finite : bool, optional If true checks the elements of `A` and `B` are finite numbers. If false does no checking and passes matrix through to underlying algorithm. Returns ------- AA : (N, N) ndarray Generalized Schur form of A. BB : (N, N) ndarray Generalized Schur form of B. Q : (N, N) ndarray The left Schur vectors. Z : (N, N) ndarray The right Schur vectors. Notes ----- Q is transposed versus the equivalent function in Matlab. .. versionadded:: 0.11.0 Examples -------- >>> from scipy import linalg >>> np.random.seed(1234) >>> A = np.arange(9).reshape((3, 3)) >>> B = np.random.randn(3, 3) >>> AA, BB, Q, Z = linalg.qz(A, B) >>> AA array([[-13.40928183, -4.62471562, 1.09215523], [ 0. , 0. , 1.22805978], [ 0. , 0. , 0.31973817]]) >>> BB array([[ 0.33362547, -1.37393632, 0.02179805], [ 0. , 1.68144922, 0.74683866], [ 0. , 0. , 0.9258294 ]]) >>> Q array([[ 0.14134727, -0.97562773, 0.16784365], [ 0.49835904, -0.07636948, -0.86360059], [ 0.85537081, 0.20571399, 0.47541828]]) >>> Z array([[-0.24900855, -0.51772687, 0.81850696], [-0.79813178, 0.58842606, 0.12938478], [-0.54861681, -0.6210585 , -0.55973739]]) See also -------- ordqz """ # output for real # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info # output for complex # AA, BB, sdim, alpha, beta, vsl, vsr, work, info result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite) return result[0], result[1], result[-4], result[-3] def ordqz(A, B, sort='lhp', output='real', overwrite_a=False, overwrite_b=False, check_finite=True): """QZ decomposition for a pair of matrices with reordering. .. versionadded:: 0.17.0 Parameters ---------- A : (N, N) array_like 2d array to decompose B : (N, N) array_like 2d array to decompose sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given an ordered pair ``(alpha, beta)`` representing the eigenvalue ``x = (alpha/beta)``, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). For the real matrix pairs ``beta`` is real while ``alpha`` can be complex, and for complex matrix pairs both ``alpha`` and ``beta`` can be complex. The callable must be able to accept a numpy array. Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) With the predefined sorting functions, an infinite eigenvalue (i.e. ``alpha != 0`` and ``beta = 0``) is considered to lie in neither the left-hand nor the right-hand plane, but it is considered to lie outside the unit circle. For the eigenvalue ``(alpha, beta) = (0, 0)`` the predefined sorting functions all return `False`. output : str {'real','complex'}, optional Construct the real or complex QZ decomposition for real matrices. Default is 'real'. overwrite_a : bool, optional If True, the contents of A are overwritten. overwrite_b : bool, optional If True, the contents of B are overwritten. check_finite : bool, optional If true checks the elements of `A` and `B` are finite numbers. If false does no checking and passes matrix through to underlying algorithm. Returns ------- AA : (N, N) ndarray Generalized Schur form of A. BB : (N, N) ndarray Generalized Schur form of B. alpha : (N,) ndarray alpha = alphar + alphai * 1j. See notes. beta : (N,) ndarray See notes. Q : (N, N) ndarray The left Schur vectors. Z : (N, N) ndarray The right Schur vectors. Notes ----- On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) that would result if the 2-by-2 diagonal blocks of the real generalized Schur form of (A,B) were further reduced to triangular form using complex unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex conjugate pair, with ``ALPHAI(j+1)`` negative. See also -------- qz Examples -------- >>> from scipy.linalg import ordqz >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]]) >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp') Since we have sorted for left half plane eigenvalues, negatives come first >>> (alpha/beta).real < 0 array([ True, True, False, False], dtype=bool) """ # NOTE: should users be able to set these? lwork = None result, typ = _qz(A, B, output=output, lwork=lwork, sort=None, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite) AA, BB, Q, Z = result[0], result[1], result[-4], result[-3] if typ not in 'cz': alpha, beta = result[3] + result[4]*1.j, result[5] else: alpha, beta = result[3], result[4] sfunction = _select_function(sort) select = sfunction(alpha, beta) tgsen, = get_lapack_funcs(('tgsen',), (AA, BB)) if lwork is None or lwork == -1: result = tgsen(select, AA, BB, Q, Z, lwork=-1) lwork = result[-3][0].real.astype(np.int) # looks like wrong value passed to ZTGSYL if not lwork += 1 liwork = None if liwork is None or liwork == -1: result = tgsen(select, AA, BB, Q, Z, liwork=-1) liwork = result[-2][0] result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork) info = result[-1] if info < 0: raise ValueError("Illegal value in argument %d of tgsen" % -info) elif info == 1: raise ValueError("Reordering of (A, B) failed because the transformed" " matrix pair (A, B) would be too far from " "generalized Schur form; the problem is very " "ill-conditioned. (A, B) may have been partially " "reorded. If requested, 0 is returned in DIF(*), " "PL, and PR.") # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif, # work, iwork, info if typ in ['f', 'd']: alpha = result[2] + result[3] * 1.j return (result[0], result[1], alpha, result[4], result[5], result[6]) # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work, # iwork, info else: return result[0], result[1], result[2], result[3], result[4], result[5]
14,611
34.990148
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp_lu.py
"""LU decomposition functions.""" from __future__ import division, print_function, absolute_import from warnings import warn from numpy import asarray, asarray_chkfinite # Local imports from .misc import _datacopied, LinAlgWarning from .lapack import get_lapack_funcs from .flinalg import get_flinalg_funcs __all__ = ['lu', 'lu_solve', 'lu_factor'] def lu_factor(a, overwrite_a=False, check_finite=True): """ Compute pivoted LU decomposition of a matrix. The decomposition is:: A = P L U where P is a permutation matrix, L lower triangular with unit diagonal elements, and U upper triangular. Parameters ---------- a : (M, M) array_like Matrix to decompose overwrite_a : bool, optional Whether to overwrite data in A (may increase performance) check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- lu : (N, N) ndarray Matrix containing U in its upper triangle, and L in its lower triangle. The unit diagonal elements of L are not stored. piv : (N,) ndarray Pivot indices representing the permutation matrix P: row i of matrix was interchanged with row piv[i]. See also -------- lu_solve : solve an equation system using the LU factorization of a matrix Notes ----- This is a wrapper to the ``*GETRF`` routines from LAPACK. Examples -------- >>> from scipy.linalg import lu_factor >>> from numpy import tril, triu, allclose, zeros, eye >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) >>> lu, piv = lu_factor(A) >>> piv array([2, 2, 3, 3], dtype=int32) Convert LAPACK's ``piv`` array to NumPy index and test the permutation >>> piv_py = [2, 0, 3, 1] >>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu) >>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4))) True """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) getrf, = get_lapack_funcs(('getrf',), (a1,)) lu, piv, info = getrf(a1, overwrite_a=overwrite_a) if info < 0: raise ValueError('illegal value in %d-th argument of ' 'internal getrf (lu_factor)' % -info) if info > 0: warn("Diagonal number %d is exactly zero. Singular matrix." % info, LinAlgWarning, stacklevel=2) return lu, piv def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True): """Solve an equation system, a x = b, given the LU factorization of a Parameters ---------- (lu, piv) Factorization of the coefficient matrix a, as given by lu_factor b : array Right-hand side trans : {0, 1, 2}, optional Type of system to solve: ===== ========= trans system ===== ========= 0 a x = b 1 a^T x = b 2 a^H x = b ===== ========= overwrite_b : bool, optional Whether to overwrite data in b (may increase performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array Solution to the system See also -------- lu_factor : LU factorize a matrix Examples -------- >>> from scipy.linalg import lu_factor, lu_solve >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) >>> b = np.array([1, 1, 1, 1]) >>> lu, piv = lu_factor(A) >>> x = lu_solve((lu, piv), b) >>> np.allclose(A @ x - b, np.zeros((4,))) True """ (lu, piv) = lu_and_piv if check_finite: b1 = asarray_chkfinite(b) else: b1 = asarray(b) overwrite_b = overwrite_b or _datacopied(b1, b) if lu.shape[0] != b1.shape[0]: raise ValueError("incompatible dimensions.") getrs, = get_lapack_funcs(('getrs',), (lu, b1)) x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b) if info == 0: return x raise ValueError('illegal value in %d-th argument of internal gesv|posv' % -info) def lu(a, permute_l=False, overwrite_a=False, check_finite=True): """ Compute pivoted LU decomposition of a matrix. The decomposition is:: A = P L U where P is a permutation matrix, L lower triangular with unit diagonal elements, and U upper triangular. Parameters ---------- a : (M, N) array_like Array to decompose permute_l : bool, optional Perform the multiplication P*L (Default: do not permute) overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- **(If permute_l == False)** p : (M, M) ndarray Permutation matrix l : (M, K) ndarray Lower triangular or trapezoidal matrix with unit diagonal. K = min(M, N) u : (K, N) ndarray Upper triangular or trapezoidal matrix **(If permute_l == True)** pl : (M, K) ndarray Permuted L matrix. K = min(M, N) u : (K, N) ndarray Upper triangular or trapezoidal matrix Notes ----- This is a LU factorization routine written for Scipy. Examples -------- >>> from scipy.linalg import lu >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) >>> p, l, u = lu(A) >>> np.allclose(A - p @ l @ u, np.zeros((4, 4))) True """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) != 2: raise ValueError('expected matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) flu, = get_flinalg_funcs(('lu',), (a1,)) p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a) if info < 0: raise ValueError('illegal value in %d-th argument of ' 'internal lu.getrf' % -info) if permute_l: return l, u return p, l, u
6,816
29.297778
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_interpolative_backend.py
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** """ Direct wrappers for Fortran `id_dist` backend. """ import scipy.linalg._interpolative as _id import numpy as np _RETCODE_ERROR = RuntimeError("nonzero return code") #------------------------------------------------------------------------------ # id_rand.f #------------------------------------------------------------------------------ def id_srand(n): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. :param n: Number of pseudorandom numbers to generate. :type n: int :return: Pseudorandom numbers. :rtype: :class:`numpy.ndarray` """ return _id.id_srand(n) def id_srandi(t): """ Initialize seed values for :func:`id_srand` (any appropriately random numbers will do). :param t: Array of 55 seed values. :type t: :class:`numpy.ndarray` """ t = np.asfortranarray(t) _id.id_srandi(t) def id_srando(): """ Reset seed values to their original values. """ _id.id_srando() #------------------------------------------------------------------------------ # idd_frm.f #------------------------------------------------------------------------------ def idd_frm(n, w, x): """ Transform real vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idd_sfrm`, this routine works best when the length of the transformed vector is the power-of-two integer output by :func:`idd_frmi`, or when the length is not specified but instead determined a posteriori from the output. The returned transformed vector is randomly permuted. :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idd_frmi`; `n` is also the length of the output vector. :type n: int :param w: Initialization array constructed by :func:`idd_frmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idd_frm(n, w, x) def idd_sfrm(l, n, w, x): """ Transform real vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idd_frm`, this routine works best when the length of the transformed vector is known a priori. :param l: Length of transformed vector, satisfying `l <= n`. :type l: int :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idd_sfrmi`. :type n: int :param w: Initialization array constructed by :func:`idd_sfrmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idd_sfrm(l, n, w, x) def idd_frmi(m): """ Initialize data for :func:`idd_frm`. :param m: Length of vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idd_frm`. :rtype: :class:`numpy.ndarray` """ return _id.idd_frmi(m) def idd_sfrmi(l, m): """ Initialize data for :func:`idd_sfrm`. :param l: Length of output transformed vector. :type l: int :param m: Length of the vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idd_sfrm`. :rtype: :class:`numpy.ndarray` """ return _id.idd_sfrmi(l, m) #------------------------------------------------------------------------------ # idd_id.f #------------------------------------------------------------------------------ def iddp_id(eps, A): """ Compute ID of a real matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) k, idx, rnorms = _id.iddp_id(eps, A) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def iddr_id(A, k): """ Compute ID of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) idx, rnorms = _id.iddr_id(A, k) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj def idd_reconid(B, idx, proj): """ Reconstruct matrix from real ID. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Reconstructed matrix. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) if proj.size > 0: return _id.idd_reconid(B, idx, proj) else: return B[:, np.argsort(idx)] def idd_reconint(idx, proj): """ Reconstruct interpolation matrix from real ID. :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Interpolation matrix. :rtype: :class:`numpy.ndarray` """ return _id.idd_reconint(idx, proj) def idd_copycols(A, k, idx): """ Reconstruct skeleton matrix from real ID. :param A: Original matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :param idx: Column index array. :type idx: :class:`numpy.ndarray` :return: Skeleton matrix. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) return _id.idd_copycols(A, k, idx) #------------------------------------------------------------------------------ # idd_id2svd.f #------------------------------------------------------------------------------ def idd_id2svd(B, idx, proj): """ Convert real ID to SVD. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) U, V, S, ier = _id.idd_id2svd(B, idx, proj) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idd_snorm.f #------------------------------------------------------------------------------ def idd_snorm(m, n, matvect, matvec, its=20): """ Estimate spectral norm of a real matrix by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate. :rtype: float """ snorm, v = _id.idd_snorm(m, n, matvect, matvec, its) return snorm def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20): """ Estimate spectral norm of the difference of two real matrices by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the transpose of the first matrix to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvect2: Function to apply the transpose of the second matrix to a vector, with call signature `y = matvect2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect2: function :param matvec: Function to apply the first matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param matvec2: Function to apply the second matrix to a vector, with call signature `y = matvec2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec2: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate of matrix difference. :rtype: float """ return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its) #------------------------------------------------------------------------------ # idd_svd.f #------------------------------------------------------------------------------ def iddr_svd(A, k): """ Compute SVD of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) U, V, S, ier = _id.iddr_svd(A, k) if ier: raise _RETCODE_ERROR return U, V, S def iddp_svd(eps, A): """ Compute SVD of a real matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddp_aid.f #------------------------------------------------------------------------------ def iddp_aid(eps, A): """ Compute ID of a real matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, w = idd_frmi(m) proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F') k, idx, proj = _id.iddp_aid(eps, A, w, proj) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idd_estrank(eps, A): """ Estimate rank of a real matrix to a specified relative precision using random sampling. The output rank is typically about 8 higher than the actual rank. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank estimate. :rtype: int """ A = np.asfortranarray(A) m, n = A.shape n2, w = idd_frmi(m) ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F') k, ra = _id.idd_estrank(eps, A, w, ra) return k #------------------------------------------------------------------------------ # iddp_asvd.f #------------------------------------------------------------------------------ def iddp_asvd(eps, A): """ Compute SVD of a real matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, winit = _id.idd_frmi(m) w = np.empty( max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2, (2*n + 1)*(n2 + 1)), order='F') k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddp_rid.f #------------------------------------------------------------------------------ def iddp_rid(eps, m, n, matvect): """ Compute ID of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F') k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj) if ier != 0: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idd_findrank(eps, m, n, matvect): """ Estimate rank of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :return: Rank estimate. :rtype: int """ k, ra, ier = _id.idd_findrank(eps, m, n, matvect) if ier: raise _RETCODE_ERROR return k #------------------------------------------------------------------------------ # iddp_rsvd.f #------------------------------------------------------------------------------ def iddp_rsvd(eps, m, n, matvect, matvec): """ Compute SVD of a real matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # iddr_aid.f #------------------------------------------------------------------------------ def iddr_aid(A, k): """ Compute ID of a real matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = iddr_aidi(m, n, k) idx, proj = _id.iddr_aid(A, k, w) if k == n: proj = np.array([], dtype='float64', order='F') else: proj = proj.reshape((k, n-k), order='F') return idx, proj def iddr_aidi(m, n, k): """ Initialize array for :func:`iddr_aid`. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param k: Rank of ID. :type k: int :return: Initialization array to be used by :func:`iddr_aid`. :rtype: :class:`numpy.ndarray` """ return _id.iddr_aidi(m, n, k) #------------------------------------------------------------------------------ # iddr_asvd.f #------------------------------------------------------------------------------ def iddr_asvd(A, k): """ Compute SVD of a real matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F') w_ = iddr_aidi(m, n, k) w[:w_.size] = w_ U, V, S, ier = _id.iddr_asvd(A, k, w) if ier != 0: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # iddr_rid.f #------------------------------------------------------------------------------ def iddr_rid(m, n, matvect, k): """ Compute ID of a real matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ idx, proj = _id.iddr_rid(m, n, matvect, k) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj #------------------------------------------------------------------------------ # iddr_rsvd.f #------------------------------------------------------------------------------ def iddr_rsvd(m, n, matvect, matvec, k): """ Compute SVD of a real matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k) if ier != 0: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idz_frm.f #------------------------------------------------------------------------------ def idz_frm(n, w, x): """ Transform complex vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idz_sfrm`, this routine works best when the length of the transformed vector is the power-of-two integer output by :func:`idz_frmi`, or when the length is not specified but instead determined a posteriori from the output. The returned transformed vector is randomly permuted. :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idz_frmi`; `n` is also the length of the output vector. :type n: int :param w: Initialization array constructed by :func:`idz_frmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idz_frm(n, w, x) def idz_sfrm(l, n, w, x): """ Transform complex vector via a composition of Rokhlin's random transform, random subselection, and an FFT. In contrast to :func:`idz_frm`, this routine works best when the length of the transformed vector is known a priori. :param l: Length of transformed vector, satisfying `l <= n`. :type l: int :param n: Greatest power-of-two integer satisfying `n <= x.size` as obtained from :func:`idz_sfrmi`. :type n: int :param w: Initialization array constructed by :func:`idd_sfrmi`. :type w: :class:`numpy.ndarray` :param x: Vector to be transformed. :type x: :class:`numpy.ndarray` :return: Transformed vector. :rtype: :class:`numpy.ndarray` """ return _id.idz_sfrm(l, n, w, x) def idz_frmi(m): """ Initialize data for :func:`idz_frm`. :param m: Length of vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idz_frm`. :rtype: :class:`numpy.ndarray` """ return _id.idz_frmi(m) def idz_sfrmi(l, m): """ Initialize data for :func:`idz_sfrm`. :param l: Length of output transformed vector. :type l: int :param m: Length of the vector to be transformed. :type m: int :return: Greatest power-of-two integer `n` satisfying `n <= m`. :rtype: int :return: Initialization array to be used by :func:`idz_sfrm`. :rtype: :class:`numpy.ndarray` """ return _id.idz_sfrmi(l, m) #------------------------------------------------------------------------------ # idz_id.f #------------------------------------------------------------------------------ def idzp_id(eps, A): """ Compute ID of a complex matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) k, idx, rnorms = _id.idzp_id(eps, A) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idzr_id(A, k): """ Compute ID of a complex matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) idx, rnorms = _id.idzr_id(A, k) n = A.shape[1] proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj def idz_reconid(B, idx, proj): """ Reconstruct matrix from complex ID. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Reconstructed matrix. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) if proj.size > 0: return _id.idz_reconid(B, idx, proj) else: return B[:, np.argsort(idx)] def idz_reconint(idx, proj): """ Reconstruct interpolation matrix from complex ID. :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Interpolation matrix. :rtype: :class:`numpy.ndarray` """ return _id.idz_reconint(idx, proj) def idz_copycols(A, k, idx): """ Reconstruct skeleton matrix from complex ID. :param A: Original matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :param idx: Column index array. :type idx: :class:`numpy.ndarray` :return: Skeleton matrix. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) return _id.idz_copycols(A, k, idx) #------------------------------------------------------------------------------ # idz_id2svd.f #------------------------------------------------------------------------------ def idz_id2svd(B, idx, proj): """ Convert complex ID to SVD. :param B: Skeleton matrix. :type B: :class:`numpy.ndarray` :param idx: Column index array. :type idx: :class:`numpy.ndarray` :param proj: Interpolation coefficients. :type proj: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ B = np.asfortranarray(B) U, V, S, ier = _id.idz_id2svd(B, idx, proj) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idz_snorm.f #------------------------------------------------------------------------------ def idz_snorm(m, n, matveca, matvec, its=20): """ Estimate spectral norm of a complex matrix by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate. :rtype: float """ snorm, v = _id.idz_snorm(m, n, matveca, matvec, its) return snorm def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20): """ Estimate spectral norm of the difference of two complex matrices by the randomized power method. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the adjoint of the first matrix to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matveca2: Function to apply the adjoint of the second matrix to a vector, with call signature `y = matveca2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca2: function :param matvec: Function to apply the first matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param matvec2: Function to apply the second matrix to a vector, with call signature `y = matvec2(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec2: function :param its: Number of power method iterations. :type its: int :return: Spectral norm estimate of matrix difference. :rtype: float """ return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its) #------------------------------------------------------------------------------ # idz_svd.f #------------------------------------------------------------------------------ def idzr_svd(A, k): """ Compute SVD of a complex matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) U, V, S, ier = _id.idzr_svd(A, k) if ier: raise _RETCODE_ERROR return U, V, S def idzp_svd(eps, A): """ Compute SVD of a complex matrix to a specified relative precision. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzp_aid.f #------------------------------------------------------------------------------ def idzp_aid(eps, A): """ Compute ID of a complex matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, w = idz_frmi(m) proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F') k, idx, proj = _id.idzp_aid(eps, A, w, proj) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idz_estrank(eps, A): """ Estimate rank of a complex matrix to a specified relative precision using random sampling. The output rank is typically about 8 higher than the actual rank. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Rank estimate. :rtype: int """ A = np.asfortranarray(A) m, n = A.shape n2, w = idz_frmi(m) ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F') k, ra = _id.idz_estrank(eps, A, w, ra) return k #------------------------------------------------------------------------------ # idzp_asvd.f #------------------------------------------------------------------------------ def idzp_asvd(eps, A): """ Compute SVD of a complex matrix to a specified relative precision using random sampling. :param eps: Relative precision. :type eps: float :param A: Matrix. :type A: :class:`numpy.ndarray` :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape n2, winit = _id.idz_frmi(m) w = np.empty( max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2, (2*n + 1)*(n2 + 1)), dtype=np.complex128, order='F') k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzp_rid.f #------------------------------------------------------------------------------ def idzp_rid(eps, m, n, matveca): """ Compute ID of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty( m + 1 + 2*n*(min(m, n) + 1), dtype=np.complex128, order='F') k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) if ier: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj def idz_findrank(eps, m, n, matveca): """ Estimate rank of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank estimate. :rtype: int """ k, ra, ier = _id.idz_findrank(eps, m, n, matveca) if ier: raise _RETCODE_ERROR return k #------------------------------------------------------------------------------ # idzp_rsvd.f #------------------------------------------------------------------------------ def idzp_rsvd(eps, m, n, matveca, matvec): """ Compute SVD of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec) if ier: raise _RETCODE_ERROR U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') S = w[iS-1:iS+k-1] return U, V, S #------------------------------------------------------------------------------ # idzr_aid.f #------------------------------------------------------------------------------ def idzr_aid(A, k): """ Compute ID of a complex matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = idzr_aidi(m, n, k) idx, proj = _id.idzr_aid(A, k, w) if k == n: proj = np.array([], dtype='complex128', order='F') else: proj = proj.reshape((k, n-k), order='F') return idx, proj def idzr_aidi(m, n, k): """ Initialize array for :func:`idzr_aid`. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param k: Rank of ID. :type k: int :return: Initialization array to be used by :func:`idzr_aid`. :rtype: :class:`numpy.ndarray` """ return _id.idzr_aidi(m, n, k) #------------------------------------------------------------------------------ # idzr_asvd.f #------------------------------------------------------------------------------ def idzr_asvd(A, k): """ Compute SVD of a complex matrix to a specified rank using random sampling. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) m, n = A.shape w = np.empty( (2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90, dtype='complex128', order='F') w_ = idzr_aidi(m, n, k) w[:w_.size] = w_ U, V, S, ier = _id.idzr_asvd(A, k, w) if ier: raise _RETCODE_ERROR return U, V, S #------------------------------------------------------------------------------ # idzr_rid.f #------------------------------------------------------------------------------ def idzr_rid(m, n, matveca, k): """ Compute ID of a complex matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ idx, proj = _id.idzr_rid(m, n, matveca, k) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj #------------------------------------------------------------------------------ # idzr_rsvd.f #------------------------------------------------------------------------------ def idzr_rsvd(m, n, matveca, matvec, k): """ Compute SVD of a complex matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :param matvec: Function to apply the matrix to a vector, with call signature `y = matvec(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvec: function :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k) if ier: raise _RETCODE_ERROR return U, V, S
44,935
25.907784
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp.py
# # Author: Pearu Peterson, March 2002 # # additions by Travis Oliphant, March 2002 # additions by Eric Jones, June 2002 # additions by Johannes Loehnert, June 2006 # additions by Bart Vandereycken, June 2006 # additions by Andrew D Straw, May 2007 # additions by Tiziano Zito, November 2008 # # April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were # moved to their own files. Still in this file are functions for eigenstuff # and for the Hessenberg form. from __future__ import division, print_function, absolute_import __all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh', 'eig_banded', 'eigvals_banded', 'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf'] import numpy from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast, flatnonzero, conj, asarray, argsort, empty, newaxis, argwhere, iscomplex, eye, zeros, einsum) # Local imports from scipy._lib.six import xrange from scipy._lib._util import _asarray_validated from scipy._lib.six import string_types from .misc import LinAlgError, _datacopied, norm from .lapack import get_lapack_funcs, _compute_lwork _I = cast['F'](1j) def _make_complex_eigvecs(w, vin, dtype): """ Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output """ # - see LAPACK man page DGGEV at ALPHAI v = numpy.array(vin, dtype=dtype) m = (w.imag > 0) m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 for i in flatnonzero(m): v.imag[:, i] = vin[:, i+1] conj(v[:, i], v[:, i+1]) return v def _make_eigvals(alpha, beta, homogeneous_eigvals): if homogeneous_eigvals: if beta is None: return numpy.vstack((alpha, numpy.ones_like(alpha))) else: return numpy.vstack((alpha, beta)) else: if beta is None: return alpha else: w = numpy.empty_like(alpha) alpha_zero = (alpha == 0) beta_zero = (beta == 0) beta_nonzero = ~beta_zero w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero] # Use numpy.inf for complex values too since # 1/numpy.inf = 0, i.e. it correctly behaves as projective # infinity. w[~alpha_zero & beta_zero] = numpy.inf if numpy.all(alpha.imag == 0): w[alpha_zero & beta_zero] = numpy.nan else: w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan) return w def _geneig(a1, b1, left, right, overwrite_a, overwrite_b, homogeneous_eigvals): ggev, = get_lapack_funcs(('ggev',), (a1, b1)) cvl, cvr = left, right res = ggev(a1, b1, lwork=-1) lwork = res[-2][0].real.astype(numpy.int) if ggev.typecode in 'cz': alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, overwrite_a, overwrite_b) w = _make_eigvals(alpha, beta, homogeneous_eigvals) else: alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, overwrite_a, overwrite_b) alpha = alphar + _I * alphai w = _make_eigvals(alpha, beta, homogeneous_eigvals) _check_info(info, 'generalized eig algorithm (ggev)') only_real = numpy.all(w.imag == 0.0) if not (ggev.typecode in 'cz' or only_real): t = w.dtype.char if left: vl = _make_complex_eigvecs(w, vl, t) if right: vr = _make_complex_eigvecs(w, vr, t) # the eigenvectors returned by the lapack function are NOT normalized for i in xrange(vr.shape[0]): if right: vr[:, i] /= norm(vr[:, i]) if left: vl[:, i] /= norm(vl[:, i]) if not (left or right): return w if left: if right: return w, vl, vr return w, vl return w, vr def eig(a, b=None, left=False, right=True, overwrite_a=False, overwrite_b=False, check_finite=True, homogeneous_eigvals=False): """ Solve an ordinary or generalized eigenvalue problem of a square matrix. Find eigenvalues w and right or left eigenvectors of a general matrix:: a vr[:,i] = w[i] b vr[:,i] a.H vl[:,i] = w[i].conj() b.H vl[:,i] where ``.H`` is the Hermitian conjugation. Parameters ---------- a : (M, M) array_like A complex or real matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional Right-hand side matrix in a generalized eigenvalue problem. Default is None, identity matrix is assumed. left : bool, optional Whether to calculate and return left eigenvectors. Default is False. right : bool, optional Whether to calculate and return right eigenvectors. Default is True. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. overwrite_b : bool, optional Whether to overwrite `b`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. homogeneous_eigvals : bool, optional If True, return the eigenvalues in homogeneous coordinates. In this case ``w`` is a (2, M) array so that:: w[1,i] a vr[:,i] = w[0,i] b vr[:,i] Default is False. Returns ------- w : (M,) or (2, M) double or complex ndarray The eigenvalues, each repeated according to its multiplicity. The shape is (M,) unless ``homogeneous_eigvals=True``. vl : (M, M) double or complex ndarray The normalized left eigenvector corresponding to the eigenvalue ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``. vr : (M, M) double or complex ndarray The normalized right eigenvector corresponding to the eigenvalue ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eigvals : eigenvalues of general arrays eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays. eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices eigh_tridiagonal : eigenvalues and right eiegenvectors for symmetric/Hermitian tridiagonal matrices Examples -------- >>> from scipy import linalg >>> a = np.array([[0., -1.], [1., 0.]]) >>> linalg.eigvals(a) array([0.+1.j, 0.-1.j]) >>> b = np.array([[0., 1.], [1., 1.]]) >>> linalg.eigvals(a, b) array([ 1.+0.j, -1.+0.j]) >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) >>> linalg.eigvals(a, homogeneous_eigvals=True) array([[3.+0.j, 8.+0.j, 7.+0.j], [1.+0.j, 1.+0.j, 1.+0.j]]) >>> a = np.array([[0., -1.], [1., 0.]]) >>> linalg.eigvals(a) == linalg.eig(a)[0] array([ True, True]) >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector array([[-0.70710678+0.j , -0.70710678-0.j ], [-0. +0.70710678j, -0. -0.70710678j]]) >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector array([[0.70710678+0.j , 0.70710678-0.j ], [0. -0.70710678j, 0. +0.70710678j]]) """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) if b is not None: b1 = _asarray_validated(b, check_finite=check_finite) overwrite_b = overwrite_b or _datacopied(b1, b) if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: raise ValueError('expected square matrix') if b1.shape != a1.shape: raise ValueError('a and b must have the same shape') return _geneig(a1, b1, left, right, overwrite_a, overwrite_b, homogeneous_eigvals) geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,)) compute_vl, compute_vr = left, right lwork = _compute_lwork(geev_lwork, a1.shape[0], compute_vl=compute_vl, compute_vr=compute_vr) if geev.typecode in 'cz': w, vl, vr, info = geev(a1, lwork=lwork, compute_vl=compute_vl, compute_vr=compute_vr, overwrite_a=overwrite_a) w = _make_eigvals(w, None, homogeneous_eigvals) else: wr, wi, vl, vr, info = geev(a1, lwork=lwork, compute_vl=compute_vl, compute_vr=compute_vr, overwrite_a=overwrite_a) t = {'f': 'F', 'd': 'D'}[wr.dtype.char] w = wr + _I * wi w = _make_eigvals(w, None, homogeneous_eigvals) _check_info(info, 'eig algorithm (geev)', positive='did not converge (only eigenvalues ' 'with order >= %d have converged)') only_real = numpy.all(w.imag == 0.0) if not (geev.typecode in 'cz' or only_real): t = w.dtype.char if left: vl = _make_complex_eigvecs(w, vl, t) if right: vr = _make_complex_eigvecs(w, vr, t) if not (left or right): return w if left: if right: return w, vl, vr return w, vl return w, vr def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1, check_finite=True): """ Solve an ordinary or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues w and optionally eigenvectors v of matrix `a`, where `b` is positive definite:: a v[:,i] = w[i] b v[:,i] v[i,:].conj() a v[:,i] = w[i] v[i,:].conj() b v[:,i] = 1 Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric definite positive matrix in. If omitted, identity matrix is assumed. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of `a`. (Default: lower) eigvals_only : bool, optional Whether to calculate only eigenvalues and no eigenvectors. (Default: both are calculated) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. type : int, optional Specifies the problem type to be solved: type = 1: a v[:,i] = w[i] b v[:,i] type = 2: a b v[:,i] = w[i] v[:,i] type = 3: b a v[:,i] = w[i] v[:,i] overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, N) complex ndarray (if eigvals_only == False) The normalized selected eigenvector corresponding to the eigenvalue w[i] is the column v[:,i]. Normalization: type 1 and 3: v.conj() a v = w type 2: inv(v).conj() a inv(v) = w type = 1 or 2: v.conj() b v = I type = 3: v.conj() inv(b) v = I Raises ------ LinAlgError If eigenvalue computation does not converge, an error occurred, or b matrix is not definite positive. Note that if input matrices are not symmetric or hermitian, no error is reported but results will be wrong. See Also -------- eigvalsh : eigenvalues of symmetric or Hermitian arrays eig : eigenvalues and right eigenvectors for non-symmetric arrays eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eigh_tridiagonal : eigenvalues and right eiegenvectors for symmetric/Hermitian tridiagonal matrices Notes ----- This function does not check the input array for being hermitian/symmetric in order to allow for representing arrays with only their upper/lower triangular parts. Examples -------- >>> from scipy.linalg import eigh >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) >>> w, v = eigh(A) >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) True """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) if iscomplexobj(a1): cplx = True else: cplx = False if b is not None: b1 = _asarray_validated(b, check_finite=check_finite) overwrite_b = overwrite_b or _datacopied(b1, b) if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: raise ValueError('expected square matrix') if b1.shape != a1.shape: raise ValueError("wrong b dimensions %s, should " "be %s" % (str(b1.shape), str(a1.shape))) if iscomplexobj(b1): cplx = True else: cplx = cplx or False else: b1 = None # Set job for fortran routines _job = (eigvals_only and 'N') or 'V' # port eigenvalue range from python to fortran convention if eigvals is not None: lo, hi = eigvals if lo < 0 or hi >= a1.shape[0]: raise ValueError('The eigenvalue range specified is not valid.\n' 'Valid range is [%s,%s]' % (0, a1.shape[0]-1)) lo += 1 hi += 1 eigvals = (lo, hi) # set lower if lower: uplo = 'L' else: uplo = 'U' # fix prefix for lapack routines if cplx: pfx = 'he' else: pfx = 'sy' # Standard Eigenvalue Problem # Use '*evr' routines # FIXME: implement calculation of optimal lwork # for all lapack routines if b1 is None: driver = pfx+'evr' (evr,) = get_lapack_funcs((driver,), (a1,)) if eigvals is None: w, v, info = evr(a1, uplo=uplo, jobz=_job, range="A", il=1, iu=a1.shape[0], overwrite_a=overwrite_a) else: (lo, hi) = eigvals w_tot, v, info = evr(a1, uplo=uplo, jobz=_job, range="I", il=lo, iu=hi, overwrite_a=overwrite_a) w = w_tot[0:hi-lo+1] # Generalized Eigenvalue Problem else: # Use '*gvx' routines if range is specified if eigvals is not None: driver = pfx+'gvx' (gvx,) = get_lapack_funcs((driver,), (a1, b1)) (lo, hi) = eigvals w_tot, v, ifail, info = gvx(a1, b1, uplo=uplo, iu=hi, itype=type, jobz=_job, il=lo, overwrite_a=overwrite_a, overwrite_b=overwrite_b) w = w_tot[0:hi-lo+1] # Use '*gvd' routine if turbo is on and no eigvals are specified elif turbo: driver = pfx+'gvd' (gvd,) = get_lapack_funcs((driver,), (a1, b1)) v, w, info = gvd(a1, b1, uplo=uplo, itype=type, jobz=_job, overwrite_a=overwrite_a, overwrite_b=overwrite_b) # Use '*gv' routine if turbo is off and no eigvals are specified else: driver = pfx+'gv' (gv,) = get_lapack_funcs((driver,), (a1, b1)) v, w, info = gv(a1, b1, uplo=uplo, itype=type, jobz=_job, overwrite_a=overwrite_a, overwrite_b=overwrite_b) # Check if we had a successful exit if info == 0: if eigvals_only: return w else: return w, v _check_info(info, driver, positive=False) # triage more specifically if info > 0 and b1 is None: raise LinAlgError("unrecoverable internal error.") # The algorithm failed to converge. elif 0 < info <= b1.shape[0]: if eigvals is not None: raise LinAlgError("the eigenvectors %s failed to" " converge." % nonzero(ifail)-1) else: raise LinAlgError("internal fortran routine failed to converge: " "%i off-diagonal elements of an " "intermediate tridiagonal form did not converge" " to zero." % info) # This occurs when b is not positive definite else: raise LinAlgError("the leading minor of order %i" " of 'b' is not positive definite. The" " factorization of 'b' could not be completed" " and no eigenvalues or eigenvectors were" " computed." % (info-b1.shape[0])) _conv_dict = {0: 0, 1: 1, 2: 2, 'all': 0, 'value': 1, 'index': 2, 'a': 0, 'v': 1, 'i': 2} def _check_select(select, select_range, max_ev, max_len): """Check that select is valid, convert to Fortran style.""" if isinstance(select, string_types): select = select.lower() try: select = _conv_dict[select] except KeyError: raise ValueError('invalid argument for select') vl, vu = 0., 1. il = iu = 1 if select != 0: # (non-all) sr = asarray(select_range) if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]: raise ValueError('select_range must be a 2-element array-like ' 'in nondecreasing order') if select == 1: # (value) vl, vu = sr if max_ev == 0: max_ev = max_len else: # 2 (index) if sr.dtype.char.lower() not in 'hilqp': raise ValueError('when using select="i", select_range must ' 'contain integers, got dtype %s (%s)' % (sr.dtype, sr.dtype.char)) # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1 il, iu = sr + 1 if min(il, iu) < 1 or max(il, iu) > max_len: raise ValueError('select_range out of bounds') max_ev = iu - il + 1 return select, vl, vu, il, iu, max_ev def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False, select='a', select_range=None, max_ev=0, check_finite=True): """ Solve real symmetric or complex hermitian band matrix eigenvalue problem. Find eigenvalues w and optionally right eigenvectors v of a:: a v[:,i] = w[i] v[:,i] v.H v = identity The matrix a is stored in a_band either in lower diagonal or upper diagonal ordered form: a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) a_band[ i - j, j] == a[i,j] (if lower form; i >= j) where u is the number of bands above the diagonal. Example of a_band (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- a_band : (u+1, M) array_like The bands of the M by M matrix a. lower : bool, optional Is the matrix in the lower form. (Default is upper form) eigvals_only : bool, optional Compute only the eigenvalues and no eigenvectors. (Default: calculate also eigenvectors) overwrite_a_band : bool, optional Discard data in a_band (may enhance performance) select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues max_ev : int, optional For select=='v', maximum number of eigenvalues expected. For other values of select, has no meaning. In doubt, leave this parameter untouched. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, M) float or complex ndarray The normalized eigenvector corresponding to the eigenvalue w[i] is the column v[:,i]. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eigvals_banded : eigenvalues for symmetric/Hermitian band matrices eig : eigenvalues and right eigenvectors of general arrays. eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eigh_tridiagonal : eigenvalues and right eiegenvectors for symmetric/Hermitian tridiagonal matrices Examples -------- >>> from scipy.linalg import eig_banded >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) >>> w, v = eig_banded(Ab, lower=True) >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) True >>> w = eig_banded(Ab, lower=True, eigvals_only=True) >>> w array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) Request only the eigenvalues between ``[-3, 4]`` >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4]) >>> w array([-2.22987175, 3.95222349]) """ if eigvals_only or overwrite_a_band: a1 = _asarray_validated(a_band, check_finite=check_finite) overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band)) else: a1 = array(a_band) if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all(): raise ValueError("array must not contain infs or NaNs") overwrite_a_band = 1 if len(a1.shape) != 2: raise ValueError('expected two-dimensional array') select, vl, vu, il, iu, max_ev = _check_select( select, select_range, max_ev, a1.shape[1]) del select_range if select == 0: if a1.dtype.char in 'GFD': # FIXME: implement this somewhen, for now go with builtin values # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1) # or by using calc_lwork.f ??? # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower) internal_name = 'hbevd' else: # a1.dtype.char in 'fd': # FIXME: implement this somewhen, for now go with builtin values # see above # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower) internal_name = 'sbevd' bevd, = get_lapack_funcs((internal_name,), (a1,)) w, v, info = bevd(a1, compute_v=not eigvals_only, lower=lower, overwrite_ab=overwrite_a_band) else: # select in [1, 2] if eigvals_only: max_ev = 1 # calculate optimal abstol for dsbevx (see manpage) if a1.dtype.char in 'fF': # single precision lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),)) else: lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),)) abstol = 2 * lamch('s') if a1.dtype.char in 'GFD': internal_name = 'hbevx' else: # a1.dtype.char in 'gfd' internal_name = 'sbevx' bevx, = get_lapack_funcs((internal_name,), (a1,)) w, v, m, ifail, info = bevx( a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev, range=select, lower=lower, overwrite_ab=overwrite_a_band, abstol=abstol) # crop off w and v w = w[:m] if not eigvals_only: v = v[:, :m] _check_info(info, internal_name) if eigvals_only: return w return w, v def eigvals(a, b=None, overwrite_a=False, check_finite=True, homogeneous_eigvals=False): """ Compute eigenvalues from an ordinary or generalized eigenvalue problem. Find eigenvalues of a general matrix:: a vr[:,i] = w[i] b vr[:,i] Parameters ---------- a : (M, M) array_like A complex or real matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional Right-hand side matrix in a generalized eigenvalue problem. If omitted, identity matrix is assumed. overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. homogeneous_eigvals : bool, optional If True, return the eigenvalues in homogeneous coordinates. In this case ``w`` is a (2, M) array so that:: w[1,i] a vr[:,i] = w[0,i] b vr[:,i] Default is False. Returns ------- w : (M,) or (2, M) double or complex ndarray The eigenvalues, each repeated according to its multiplicity but not in any specific order. The shape is (M,) unless ``homogeneous_eigvals=True``. Raises ------ LinAlgError If eigenvalue computation does not converge See Also -------- eig : eigenvalues and right eigenvectors of general arrays. eigvalsh : eigenvalues of symmetric or Hermitian arrays eigvals_banded : eigenvalues for symmetric/Hermitian band matrices eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal matrices Examples -------- >>> from scipy import linalg >>> a = np.array([[0., -1.], [1., 0.]]) >>> linalg.eigvals(a) array([0.+1.j, 0.-1.j]) >>> b = np.array([[0., 1.], [1., 1.]]) >>> linalg.eigvals(a, b) array([ 1.+0.j, -1.+0.j]) >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) >>> linalg.eigvals(a, homogeneous_eigvals=True) array([[3.+0.j, 8.+0.j, 7.+0.j], [1.+0.j, 1.+0.j, 1.+0.j]]) """ return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a, check_finite=check_finite, homogeneous_eigvals=homogeneous_eigvals) def eigvalsh(a, b=None, lower=True, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1, check_finite=True): """ Solve an ordinary or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues w of matrix a, where b is positive definite:: a v[:,i] = w[i] b v[:,i] v[i,:].conj() a v[:,i] = w[i] v[i,:].conj() b v[:,i] = 1 Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric definite positive matrix in. If omitted, identity matrix is assumed. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of `a`. (Default: lower) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. type : int, optional Specifies the problem type to be solved: type = 1: a v[:,i] = w[i] b v[:,i] type = 2: a b v[:,i] = w[i] v[:,i] type = 3: b a v[:,i] = w[i] v[:,i] overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError If eigenvalue computation does not converge, an error occurred, or b matrix is not definite positive. Note that if input matrices are not symmetric or hermitian, no error is reported but results will be wrong. See Also -------- eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eigvals : eigenvalues of general arrays eigvals_banded : eigenvalues for symmetric/Hermitian band matrices eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal matrices Notes ----- This function does not check the input array for being hermitian/symmetric in order to allow for representing arrays with only their upper/lower triangular parts. Examples -------- >>> from scipy.linalg import eigvalsh >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) >>> w = eigvalsh(A) >>> w array([-3.74637491, -0.76263923, 6.08502336, 12.42399079]) """ return eigh(a, b=b, lower=lower, eigvals_only=True, overwrite_a=overwrite_a, overwrite_b=overwrite_b, turbo=turbo, eigvals=eigvals, type=type, check_finite=check_finite) def eigvals_banded(a_band, lower=False, overwrite_a_band=False, select='a', select_range=None, check_finite=True): """ Solve real symmetric or complex hermitian band matrix eigenvalue problem. Find eigenvalues w of a:: a v[:,i] = w[i] v[:,i] v.H v = identity The matrix a is stored in a_band either in lower diagonal or upper diagonal ordered form: a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) a_band[ i - j, j] == a[i,j] (if lower form; i >= j) where u is the number of bands above the diagonal. Example of a_band (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- a_band : (u+1, M) array_like The bands of the M by M matrix a. lower : bool, optional Is the matrix in the lower form. (Default is upper form) overwrite_a_band : bool, optional Discard data in a_band (may enhance performance) select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal matrices eigvals : eigenvalues of general arrays eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eig : eigenvalues and right eigenvectors for non-symmetric arrays Examples -------- >>> from scipy.linalg import eigvals_banded >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) >>> w = eigvals_banded(Ab, lower=True) >>> w array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) """ return eig_banded(a_band, lower=lower, eigvals_only=1, overwrite_a_band=overwrite_a_band, select=select, select_range=select_range, check_finite=check_finite) def eigvalsh_tridiagonal(d, e, select='a', select_range=None, check_finite=True, tol=0., lapack_driver='auto'): """ Solve eigenvalue problem for a real symmetric tridiagonal matrix. Find eigenvalues `w` of ``a``:: a v[:,i] = w[i] v[:,i] v.H v = identity For a real symmetric matrix ``a`` with diagonal elements `d` and off-diagonal elements `e`. Parameters ---------- d : ndarray, shape (ndim,) The diagonal elements of the array. e : ndarray, shape (ndim-1,) The off-diagonal elements of the array. select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. tol : float The absolute tolerance to which each eigenvalue is required (only used when ``lapack_driver='stebz'``). An eigenvalue (or cluster) is considered to have converged if it lies in an interval of this width. If <= 0. (default), the value ``eps*|a|`` is used where eps is the machine precision, and ``|a|`` is the 1-norm of the matrix ``a``. lapack_driver : str LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` and 'stebz' otherwise. 'sterf' and 'stev' can only be used when ``select='a'``. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eigh_tridiagonal : eigenvalues and right eiegenvectors for symmetric/Hermitian tridiagonal matrices Examples -------- >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh >>> d = 3*np.ones(4) >>> e = -1*np.ones(3) >>> w = eigvalsh_tridiagonal(d, e) >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines >>> np.allclose(w - w2, np.zeros(4)) True """ return eigh_tridiagonal( d, e, eigvals_only=True, select=select, select_range=select_range, check_finite=check_finite, tol=tol, lapack_driver=lapack_driver) def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None, check_finite=True, tol=0., lapack_driver='auto'): """ Solve eigenvalue problem for a real symmetric tridiagonal matrix. Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``:: a v[:,i] = w[i] v[:,i] v.H v = identity For a real symmetric matrix ``a`` with diagonal elements `d` and off-diagonal elements `e`. Parameters ---------- d : ndarray, shape (ndim,) The diagonal elements of the array. e : ndarray, shape (ndim-1,) The off-diagonal elements of the array. select : {'a', 'v', 'i'}, optional Which eigenvalues to calculate ====== ======================================== select calculated ====== ======================================== 'a' All eigenvalues 'v' Eigenvalues in the interval (min, max] 'i' Eigenvalues with indices min <= i <= max ====== ======================================== select_range : (min, max), optional Range of selected eigenvalues check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. tol : float The absolute tolerance to which each eigenvalue is required (only used when 'stebz' is the `lapack_driver`). An eigenvalue (or cluster) is considered to have converged if it lies in an interval of this width. If <= 0. (default), the value ``eps*|a|`` is used where eps is the machine precision, and ``|a|`` is the 1-norm of the matrix ``a``. lapack_driver : str LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is used to find the corresponding eigenvectors. 'sterf' can only be used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only be used when ``select='a'``. Returns ------- w : (M,) ndarray The eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, M) ndarray The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is the column ``v[:,i]``. Raises ------ LinAlgError If eigenvalue computation does not converge. See Also -------- eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal matrices eig : eigenvalues and right eigenvectors for non-symmetric arrays eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices Notes ----- This function makes use of LAPACK ``S/DSTEMR`` routines. Examples -------- >>> from scipy.linalg import eigh_tridiagonal >>> d = 3*np.ones(4) >>> e = -1*np.ones(3) >>> w, v = eigh_tridiagonal(d, e) >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) True """ d = _asarray_validated(d, check_finite=check_finite) e = _asarray_validated(e, check_finite=check_finite) for check in (d, e): if check.ndim != 1: raise ValueError('expected one-dimensional array') if check.dtype.char in 'GFD': # complex raise TypeError('Only real arrays currently supported') if d.size != e.size + 1: raise ValueError('d (%s) must have one more element than e (%s)' % (d.size, e.size)) select, vl, vu, il, iu, _ = _check_select( select, select_range, 0, d.size) if not isinstance(lapack_driver, string_types): raise TypeError('lapack_driver must be str') drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev') if lapack_driver not in drivers: raise ValueError('lapack_driver must be one of %s, got %s' % (drivers, lapack_driver)) if lapack_driver == 'auto': lapack_driver = 'stemr' if select == 0 else 'stebz' func, = get_lapack_funcs((lapack_driver,), (d, e)) compute_v = not eigvals_only if lapack_driver == 'sterf': if select != 0: raise ValueError('sterf can only be used when select == "a"') if not eigvals_only: raise ValueError('sterf can only be used when eigvals_only is ' 'True') w, info = func(d, e) m = len(w) elif lapack_driver == 'stev': if select != 0: raise ValueError('stev can only be used when select == "a"') w, v, info = func(d, e, compute_v=compute_v) m = len(w) elif lapack_driver == 'stebz': tol = float(tol) internal_name = 'stebz' stebz, = get_lapack_funcs((internal_name,), (d, e)) # If getting eigenvectors, needs to be block-ordered (B) instead of # matirx-ordered (E), and we will reorder later order = 'E' if eigvals_only else 'B' m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol, order) else: # 'stemr' # ?STEMR annoyingly requires size N instead of N-1 e_ = empty(e.size+1, e.dtype) e_[:-1] = e stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e)) lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu, compute_v=compute_v) _check_info(info, 'stemr_lwork') m, w, v, info = func(d, e_, select, vl, vu, il, iu, compute_v=compute_v, lwork=lwork, liwork=liwork) _check_info(info, lapack_driver + ' (eigh_tridiagonal)') w = w[:m] if eigvals_only: return w else: # Do we still need to compute the eigenvalues? if lapack_driver == 'stebz': func, = get_lapack_funcs(('stein',), (d, e)) v, info = func(d, e, w, iblock, isplit) _check_info(info, 'stein (eigh_tridiagonal)', positive='%d eigenvectors failed to converge') # Convert block-order to matrix-order order = argsort(w) w, v = w[order], v[:, order] else: v = v[:, :m] return w, v def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'): """Check info return value.""" if info < 0: raise ValueError('illegal value in argument %d of internal %s' % (-info, driver)) if info > 0 and positive: raise LinAlgError(("%s " + positive) % (driver, info,)) def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True): """ Compute Hessenberg form of a matrix. The Hessenberg decomposition is:: A = Q H Q^H where `Q` is unitary/orthogonal and `H` has only zero elements below the first sub-diagonal. Parameters ---------- a : (M, M) array_like Matrix to bring into Hessenberg form. calc_q : bool, optional Whether to compute the transformation matrix. Default is False. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- H : (M, M) ndarray Hessenberg form of `a`. Q : (M, M) ndarray Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``. Only returned if ``calc_q=True``. Examples -------- >>> from scipy.linalg import hessenberg >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) >>> H, Q = hessenberg(A, calc_q=True) >>> H array([[ 2. , -11.65843866, 1.42005301, 0.25349066], [ -9.94987437, 14.53535354, -5.31022304, 2.43081618], [ 0. , -1.83299243, 0.38969961, -0.51527034], [ 0. , 0. , -3.83189513, 1.07494686]]) >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4))) True """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): raise ValueError('expected square matrix') overwrite_a = overwrite_a or (_datacopied(a1, a)) # if 2x2 or smaller: already in Hessenberg if a1.shape[0] <= 2: if calc_q: return a1, numpy.eye(a1.shape[0]) return a1 gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal', 'gehrd_lwork'), (a1,)) ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a) _check_info(info, 'gebal (hessenberg)', positive=False) n = len(a1) lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi) hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) _check_info(info, 'gehrd (hessenberg)', positive=False) h = numpy.triu(hq, -1) if not calc_q: return h # use orghr/unghr to compute q orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,)) lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi) q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) _check_info(info, 'orghr (hessenberg)', positive=False) return h, q def cdf2rdf(w, v): """ Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real eigenvalues in a block diagonal form ``wr`` and the associated real eigenvectors ``vr``, such that:: vr @ wr = X @ vr continues to hold, where ``X`` is the original array for which ``w`` and ``v`` are the eigenvalues and eigenvectors. .. versionadded:: 1.1.0 Parameters ---------- w : (..., M) array_like Complex or real eigenvalues, an array or stack of arrays Conjugate pairs must not be interleaved, else the wrong result will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result, but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not. v : (..., M, M) array_like Complex or real eigenvectors, a square array or stack of square arrays. Returns ------- wr : (..., M, M) ndarray Real diagonal block form of eigenvalues vr : (..., M, M) ndarray Real eigenvectors associated with ``wr`` See Also -------- eig : Eigenvalues and right eigenvectors for non-symmetric arrays rsf2csf : Convert real Schur form to complex Schur form Notes ----- ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``. For example, obtained by ``w, v = scipy.linalg.eig(X)`` or ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent stacked arrays. .. versionadded:: 1.1.0 Examples -------- >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) >>> X array([[ 1, 2, 3], [ 0, 4, 5], [ 0, -5, 4]]) >>> from scipy import linalg >>> w, v = linalg.eig(X) >>> w array([ 1.+0.j, 4.+5.j, 4.-5.j]) >>> v array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j], [ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j], [ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]]) >>> wr, vr = linalg.cdf2rdf(w, v) >>> wr array([[ 1., 0., 0.], [ 0., 4., 5.], [ 0., -5., 4.]]) >>> vr array([[ 1. , 0.40016, -0.01906], [ 0. , 0.64788, 0. ], [ 0. , 0. , 0.64788]]) >>> vr @ wr array([[ 1. , 1.69593, 1.9246 ], [ 0. , 2.59153, 3.23942], [ 0. , -3.23942, 2.59153]]) >>> X @ vr array([[ 1. , 1.69593, 1.9246 ], [ 0. , 2.59153, 3.23942], [ 0. , -3.23942, 2.59153]]) """ w, v = _asarray_validated(w), _asarray_validated(v) # check dimensions if w.ndim < 1: raise ValueError('expected w to be at least one-dimensional') if v.ndim < 2: raise ValueError('expected v to be at least two-dimensional') if v.ndim != w.ndim + 1: raise ValueError('expected eigenvectors array to have exactly one ' 'dimension more than eigenvalues array') # check shapes n = w.shape[-1] M = w.shape[:-1] if v.shape[-2] != v.shape[-1]: raise ValueError('expected v to be a square matrix or stacked square ' 'matrices: v.shape[-2] = v.shape[-1]') if v.shape[-1] != n: raise ValueError('expected the same number of eigenvalues as ' 'eigenvectors') # get indices for each first pair of complex eigenvalues complex_mask = iscomplex(w) n_complex = complex_mask.sum(axis=-1) # check if all complex eigenvalues have conjugate pairs if not (n_complex % 2 == 0).all(): raise ValueError('expected complex-conjugate pairs of eigenvalues') # find complex indices idx = nonzero(complex_mask) idx_stack = idx[:-1] idx_elem = idx[-1] # filter them to conjugate indices, assuming pairs are not interleaved j = idx_elem[0::2] k = idx_elem[1::2] stack_ind = () for i in idx_stack: # should never happen, assuming nonzero orders by the last axis assert (i[0::2] == i[1::2]).all(), "Conjugate pair spanned different arrays!" stack_ind += (i[0::2],) # all eigenvalues to diagonal form wr = zeros(M + (n, n), dtype=w.real.dtype) di = range(n) wr[..., di, di] = w.real # complex eigenvalues to real block diagonal form wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag # compute real eigenvectors associated with real block diagonal eigenvalues u = zeros(M + (n, n), dtype=numpy.cdouble) u[..., di, di] = 1.0 u[stack_ind + (j, j)] = 0.5j u[stack_ind + (j, k)] = 0.5 u[stack_ind + (k, j)] = -0.5j u[stack_ind + (k, k)] = 0.5 # multipy matrices v and u (equivalent to v @ u) vr = einsum('...ij,...jk->...ik', v, u).real return wr, vr
52,862
35.915503
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/setup.py
from __future__ import division, print_function, absolute_import import os from os.path import join def configuration(parent_package='', top_path=None): from distutils.sysconfig import get_python_inc from numpy.distutils.system_info import get_info, NotFoundError, numpy_info from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs from scipy._build_utils import (get_sgemv_fix, get_g77_abi_wrappers, split_fortran_files) config = Configuration('linalg', parent_package, top_path) lapack_opt = get_info('lapack_opt') if not lapack_opt: raise NotFoundError('no lapack/blas resources found') atlas_version = ([v[3:-3] for k, v in lapack_opt.get('define_macros', []) if k == 'ATLAS_INFO']+[None])[0] if atlas_version: print(('ATLAS version: %s' % atlas_version)) # fblas: sources = ['fblas.pyf.src'] sources += get_g77_abi_wrappers(lapack_opt) sources += get_sgemv_fix(lapack_opt) config.add_extension('_fblas', sources=sources, depends=['fblas_l?.pyf.src'], extra_info=lapack_opt ) # flapack: sources = ['flapack.pyf.src'] sources += get_g77_abi_wrappers(lapack_opt) dep_pfx = join('src', 'lapack_deprecations') deprecated_lapack_routines = [join(dep_pfx, c + 'gegv.f') for c in 'cdsz'] sources += deprecated_lapack_routines config.add_extension('_flapack', sources=sources, depends=['flapack_user.pyf.src'], extra_info=lapack_opt ) if atlas_version is not None: # cblas: config.add_extension('_cblas', sources=['cblas.pyf.src'], depends=['cblas.pyf.src', 'cblas_l1.pyf.src'], extra_info=lapack_opt ) # clapack: config.add_extension('_clapack', sources=['clapack.pyf.src'], depends=['clapack.pyf.src'], extra_info=lapack_opt ) # _flinalg: config.add_extension('_flinalg', sources=[join('src', 'det.f'), join('src', 'lu.f')], extra_info=lapack_opt ) # _interpolative: routines_to_split = [ 'dfftb1', 'dfftf1', 'dffti1', 'dsint1', 'dzfft1', 'id_srand', 'idd_copyints', 'idd_id2svd0', 'idd_pairsamps', 'idd_permute', 'idd_permuter', 'idd_random_transf0', 'idd_random_transf0_inv', 'idd_random_transf_init0', 'idd_subselect', 'iddp_asvd0', 'iddp_rsvd0', 'iddr_asvd0', 'iddr_rsvd0', 'idz_estrank0', 'idz_id2svd0', 'idz_permute', 'idz_permuter', 'idz_random_transf0_inv', 'idz_random_transf_init0', 'idz_random_transf_init00', 'idz_realcomp', 'idz_realcomplex', 'idz_reco', 'idz_subselect', 'idzp_aid0', 'idzp_aid1', 'idzp_asvd0', 'idzp_rsvd0', 'idzr_asvd0', 'idzr_reco', 'idzr_rsvd0', 'zfftb1', 'zfftf1', 'zffti1', ] print('Splitting linalg.interpolative Fortran source files') dirname = os.path.split(os.path.abspath(__file__))[0] fnames = split_fortran_files(join(dirname, 'src', 'id_dist', 'src'), routines_to_split) fnames = [join('src', 'id_dist', 'src', f) for f in fnames] config.add_extension('_interpolative', fnames + ["interpolative.pyf"], extra_info=lapack_opt ) # _solve_toeplitz: config.add_extension('_solve_toeplitz', sources=[('_solve_toeplitz.c')], include_dirs=[get_numpy_include_dirs()]) config.add_data_dir('tests') # Cython BLAS/LAPACK config.add_data_files('cython_blas.pxd') config.add_data_files('cython_lapack.pxd') sources = ['_blas_subroutine_wrappers.f', '_lapack_subroutine_wrappers.f'] sources += get_g77_abi_wrappers(lapack_opt) sources += get_sgemv_fix(lapack_opt) includes = numpy_info().get_include_dirs() + [get_python_inc()] config.add_library('fwrappers', sources=sources, include_dirs=includes) config.add_extension('cython_blas', sources=['cython_blas.c'], depends=['cython_blas.pyx', 'cython_blas.pxd', 'fortran_defs.h', '_blas_subroutines.h'], include_dirs=['.'], libraries=['fwrappers'], extra_info=lapack_opt) config.add_extension('cython_lapack', sources=['cython_lapack.c'], depends=['cython_lapack.pyx', 'cython_lapack.pxd', 'fortran_defs.h', '_lapack_subroutines.h'], include_dirs=['.'], libraries=['fwrappers'], extra_info=lapack_opt) config.add_extension('_decomp_update', sources=['_decomp_update.c']) # Add any license files config.add_data_files('src/id_dist/doc/doc.tex') config.add_data_files('src/lapack_deprecations/LICENSE') return config if __name__ == '__main__': from numpy.distutils.core import setup from linalg_version import linalg_version setup(version=linalg_version, **configuration(top_path='').todict())
5,845
32.988372
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/linalg_version.py
from __future__ import division, print_function, absolute_import major = 0 minor = 4 micro = 9 linalg_version = '%(major)d.%(minor)d.%(micro)d' % (locals())
159
19
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_matfuncs_inv_ssq.py
""" Matrix functions that use Pade approximation with inverse scaling and squaring. """ from __future__ import division, print_function, absolute_import import warnings import numpy as np from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu from scipy.linalg.decomp_schur import schur, rsf2csf from scipy.linalg.matfuncs import funm from scipy.linalg import svdvals, solve_triangular from scipy.sparse.linalg.interface import LinearOperator from scipy.sparse.linalg import onenormest import scipy.special class LogmRankWarning(UserWarning): pass class LogmExactlySingularWarning(LogmRankWarning): pass class LogmNearlySingularWarning(LogmRankWarning): pass class LogmError(np.linalg.LinAlgError): pass class FractionalMatrixPowerError(np.linalg.LinAlgError): pass #TODO renovate or move this class when scipy operators are more mature class _MatrixM1PowerOperator(LinearOperator): """ A representation of the linear operator (A - I)^p. """ def __init__(self, A, p): if A.ndim != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') if p < 0 or p != int(p): raise ValueError('expected p to be a non-negative integer') self._A = A self._p = p self.ndim = A.ndim self.shape = A.shape def _matvec(self, x): for i in range(self._p): x = self._A.dot(x) - x return x def _rmatvec(self, x): for i in range(self._p): x = x.dot(self._A) - x return x def _matmat(self, X): for i in range(self._p): X = self._A.dot(X) - X return X def _adjoint(self): return _MatrixM1PowerOperator(self._A.T, self._p) #TODO renovate or move this function when scipy operators are more mature def _onenormest_m1_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False): """ Efficiently estimate the 1-norm of (A - I)^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. """ return onenormest(_MatrixM1PowerOperator(A, p), t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w) def _unwindk(z): """ Compute the scalar unwinding number. Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i). Note that this definition differs in sign from the original definition in equations (5, 6) in [2]_. The sign convention is justified in [3]_. Parameters ---------- z : complex A complex number. Returns ------- unwinding_number : integer The scalar unwinding number of z. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 .. [2] Robert M. Corless and David J. Jeffrey, "The unwinding number." Newsletter ACM SIGSAM Bulletin Volume 30, Issue 2, June 1996, Pages 28-35. .. [3] Russell Bradford and Robert M. Corless and James H. Davenport and David J. Jeffrey and Stephen M. Watt, "Reasoning about the elementary functions of complex analysis" Annals of Mathematics and Artificial Intelligence, 36: 303-318, 2002. """ return int(np.ceil((z.imag - np.pi) / (2*np.pi))) def _briggs_helper_function(a, k): """ Computes r = a^(1 / (2^k)) - 1. This is algorithm (2) of [1]_. The purpose is to avoid a danger of subtractive cancellation. For more computational efficiency it should probably be cythonized. Parameters ---------- a : complex A complex number. k : integer A nonnegative integer. Returns ------- r : complex The value r = a^(1 / (2^k)) - 1 computed with less cancellation. Notes ----- The algorithm as formulated in the reference does not handle k=0 or k=1 correctly, so these are special-cased in this implementation. This function is intended to not allow `a` to belong to the closed negative real axis, but this constraint is relaxed. References ---------- .. [1] Awad H. Al-Mohy (2012) "A more accurate Briggs method for the logarithm", Numerical Algorithms, 59 : 393--402. """ if k < 0 or int(k) != k: raise ValueError('expected a nonnegative integer k') if k == 0: return a - 1 elif k == 1: return np.sqrt(a) - 1 else: k_hat = k if np.angle(a) >= np.pi / 2: a = np.sqrt(a) k_hat = k - 1 z0 = a - 1 a = np.sqrt(a) r = 1 + a for j in range(1, k_hat): a = np.sqrt(a) r = r * (1 + a) r = z0 / r return r def _fractional_power_superdiag_entry(l1, l2, t12, p): """ Compute a superdiagonal entry of a fractional matrix power. This is Eq. (5.6) in [1]_. Parameters ---------- l1 : complex A diagonal entry of the matrix. l2 : complex A diagonal entry of the matrix. t12 : complex A superdiagonal entry of the matrix. p : float A fractional power. Returns ------- f12 : complex A superdiagonal entry of the fractional matrix power. Notes ----- Care has been taken to return a real number if possible when all of the inputs are real numbers. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ if l1 == l2: f12 = t12 * p * l1**(p-1) elif abs(l2 - l1) > abs(l1 + l2) / 2: f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1) else: # This is Eq. (5.5) in [1]. z = (l2 - l1) / (l2 + l1) log_l1 = np.log(l1) log_l2 = np.log(l2) arctanh_z = np.arctanh(z) tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1)) tmp_u = _unwindk(log_l2 - log_l1) if tmp_u: tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u) else: tmp_b = p * arctanh_z tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1) f12 = tmp_a * tmp_c return f12 def _logm_superdiag_entry(l1, l2, t12): """ Compute a superdiagonal entry of a matrix logarithm. This is like Eq. (11.28) in [1]_, except the determination of whether l1 and l2 are sufficiently far apart has been modified. Parameters ---------- l1 : complex A diagonal entry of the matrix. l2 : complex A diagonal entry of the matrix. t12 : complex A superdiagonal entry of the matrix. Returns ------- f12 : complex A superdiagonal entry of the matrix logarithm. Notes ----- Care has been taken to return a real number if possible when all of the inputs are real numbers. References ---------- .. [1] Nicholas J. Higham (2008) "Functions of Matrices: Theory and Computation" ISBN 978-0-898716-46-7 """ if l1 == l2: f12 = t12 / l1 elif abs(l2 - l1) > abs(l1 + l2) / 2: f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1) else: z = (l2 - l1) / (l2 + l1) u = _unwindk(np.log(l2) - np.log(l1)) if u: f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1) else: f12 = t12 * 2 * np.arctanh(z) / (l2 - l1) return f12 def _inverse_squaring_helper(T0, theta): """ A helper function for inverse scaling and squaring for Pade approximation. Parameters ---------- T0 : (N, N) array_like upper triangular Matrix involved in inverse scaling and squaring. theta : indexable The values theta[1] .. theta[7] must be available. They represent bounds related to Pade approximation, and they depend on the matrix function which is being computed. For example, different values of theta are required for matrix logarithm than for fractional matrix power. Returns ------- R : (N, N) array_like upper triangular Composition of zero or more matrix square roots of T0, minus I. s : non-negative integer Number of square roots taken. m : positive integer The degree of the Pade approximation. Notes ----- This subroutine appears as a chunk of lines within a couple of published algorithms; for example it appears as lines 4--35 in algorithm (3.1) of [1]_, and as lines 3--34 in algorithm (4.1) of [2]_. The instances of 'goto line 38' in algorithm (3.1) of [1]_ probably mean 'goto line 36' and have been intepreted accordingly. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) "An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives." .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 """ if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]: raise ValueError('expected an upper triangular square matrix') n, n = T0.shape T = T0 # Find s0, the smallest s such that the spectral radius # of a certain diagonal matrix is at most theta[7]. # Note that because theta[7] < 1, # this search will not terminate if any diagonal entry of T is zero. s0 = 0 tmp_diag = np.diag(T) if np.count_nonzero(tmp_diag) != n: raise Exception('internal inconsistency') while np.max(np.absolute(tmp_diag - 1)) > theta[7]: tmp_diag = np.sqrt(tmp_diag) s0 += 1 # Take matrix square roots of T. for i in range(s0): T = _sqrtm_triu(T) # Flow control in this section is a little odd. # This is because I am translating algorithm descriptions # which have GOTOs in the publication. s = s0 k = 0 d2 = _onenormest_m1_power(T, 2) ** (1/2) d3 = _onenormest_m1_power(T, 3) ** (1/3) a2 = max(d2, d3) m = None for i in (1, 2): if a2 <= theta[i]: m = i break while m is None: if s > s0: d3 = _onenormest_m1_power(T, 3) ** (1/3) d4 = _onenormest_m1_power(T, 4) ** (1/4) a3 = max(d3, d4) if a3 <= theta[7]: j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i]) if j1 <= 6: m = j1 break elif a3 / 2 <= theta[5] and k < 2: k += 1 T = _sqrtm_triu(T) s += 1 continue d5 = _onenormest_m1_power(T, 5) ** (1/5) a4 = max(d4, d5) eta = min(a3, a4) for i in (6, 7): if eta <= theta[i]: m = i break if m is not None: break T = _sqrtm_triu(T) s += 1 # The subtraction of the identity is redundant here, # because the diagonal will be replaced for improved numerical accuracy, # but this formulation should help clarify the meaning of R. R = T - np.identity(n) # Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I # using formulas that have less subtractive cancellation. # Skip this step if the principal branch # does not exist at T0; this happens when a diagonal entry of T0 # is negative with imaginary part 0. has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0)) if has_principal_branch: for j in range(n): a = T0[j, j] r = _briggs_helper_function(a, s) R[j, j] = r p = np.exp2(-s) for j in range(n-1): l1 = T0[j, j] l2 = T0[j+1, j+1] t12 = T0[j, j+1] f12 = _fractional_power_superdiag_entry(l1, l2, t12, p) R[j, j+1] = f12 # Return the T-I matrix, the number of square roots, and the Pade degree. if not np.array_equal(R, np.triu(R)): raise Exception('internal inconsistency') return R, s, m def _fractional_power_pade_constant(i, t): # A helper function for matrix fractional power. if i < 1: raise ValueError('expected a positive integer i') if not (-1 < t < 1): raise ValueError('expected -1 < t < 1') if i == 1: return -t elif i % 2 == 0: j = i // 2 return (-j + t) / (2 * (2*j - 1)) elif i % 2 == 1: j = (i - 1) // 2 return (-j - t) / (2 * (2*j + 1)) else: raise Exception('internal error') def _fractional_power_pade(R, t, m): """ Evaluate the Pade approximation of a fractional matrix power. Evaluate the degree-m Pade approximation of R to the fractional matrix power t using the continued fraction in bottom-up fashion using algorithm (4.1) in [1]_. Parameters ---------- R : (N, N) array_like Upper triangular matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. m : positive integer Degree of Pade approximation. Returns ------- U : (N, N) array_like The degree-m Pade approximation of R to the fractional power t. This matrix will be upper triangular. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ if m < 1 or int(m) != m: raise ValueError('expected a positive integer m') if not (-1 < t < 1): raise ValueError('expected -1 < t < 1') R = np.asarray(R) if len(R.shape) != 2 or R.shape[0] != R.shape[1]: raise ValueError('expected an upper triangular square matrix') n, n = R.shape ident = np.identity(n) Y = R * _fractional_power_pade_constant(2*m, t) for j in range(2*m - 1, 0, -1): rhs = R * _fractional_power_pade_constant(j, t) Y = solve_triangular(ident + Y, rhs) U = ident + Y if not np.array_equal(U, np.triu(U)): raise Exception('internal inconsistency') return U def _remainder_matrix_power_triu(T, t): """ Compute a fractional power of an upper triangular matrix. The fractional power is restricted to fractions -1 < t < 1. This uses algorithm (3.1) of [1]_. The Pade approximation itself uses algorithm (4.1) of [2]_. Parameters ---------- T : (N, N) array_like Upper triangular matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) "An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives." .. [2] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ m_to_theta = { 1: 1.51e-5, 2: 2.24e-3, 3: 1.88e-2, 4: 6.04e-2, 5: 1.24e-1, 6: 2.00e-1, 7: 2.79e-1, } n, n = T.shape T0 = T T0_diag = np.diag(T0) if np.array_equal(T0, np.diag(T0_diag)): U = np.diag(T0_diag ** t) else: R, s, m = _inverse_squaring_helper(T0, m_to_theta) # Evaluate the Pade approximation. # Note that this function expects the negative of the matrix # returned by the inverse squaring helper. U = _fractional_power_pade(-R, t, m) # Undo the inverse scaling and squaring. # Be less clever about this # if the principal branch does not exist at T0; # this happens when a diagonal entry of T0 # is negative with imaginary part 0. eivals = np.diag(T0) has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals) for i in range(s, -1, -1): if i < s: U = U.dot(U) else: if has_principal_branch: p = t * np.exp2(-i) U[np.diag_indices(n)] = T0_diag ** p for j in range(n-1): l1 = T0[j, j] l2 = T0[j+1, j+1] t12 = T0[j, j+1] f12 = _fractional_power_superdiag_entry(l1, l2, t12, p) U[j, j+1] = f12 if not np.array_equal(U, np.triu(U)): raise Exception('internal inconsistency') return U def _remainder_matrix_power(A, t): """ Compute the fractional power of a matrix, for fractions -1 < t < 1. This uses algorithm (3.1) of [1]_. The Pade approximation itself uses algorithm (4.1) of [2]_. Parameters ---------- A : (N, N) array_like Matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) "An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives." .. [2] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ # This code block is copied from numpy.matrix_power(). A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('input must be a square array') # Get the number of rows and columns. n, n = A.shape # Triangularize the matrix if necessary, # attempting to preserve dtype if possible. if np.array_equal(A, np.triu(A)): Z = None T = A else: if np.isrealobj(A): T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') # Zeros on the diagonal of the triangular matrix are forbidden, # because the inverse scaling and squaring cannot deal with it. T_diag = np.diag(T) if np.count_nonzero(T_diag) != n: raise FractionalMatrixPowerError( 'cannot use inverse scaling and squaring to find ' 'the fractional matrix power of a singular matrix') # If the triangular matrix is real and has a negative # entry on the diagonal, then force the matrix to be complex. if np.isrealobj(T) and np.min(T_diag) < 0: T = T.astype(complex) # Get the fractional power of the triangular matrix, # and de-triangularize it if necessary. U = _remainder_matrix_power_triu(T, t) if Z is not None: ZH = np.conjugate(Z).T return Z.dot(U).dot(ZH) else: return U def _fractional_matrix_power(A, p): """ Compute the fractional power of a matrix. See the fractional_matrix_power docstring in matfuncs.py for more info. """ A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') if p == int(p): return np.linalg.matrix_power(A, int(p)) # Compute singular values. s = svdvals(A) # Inverse scaling and squaring cannot deal with a singular matrix, # because the process of repeatedly taking square roots # would not converge to the identity matrix. if s[-1]: # Compute the condition number relative to matrix inversion, # and use this to decide between floor(p) and ceil(p). k2 = s[0] / s[-1] p1 = p - np.floor(p) p2 = p - np.ceil(p) if p1 * k2 ** (1 - p1) <= -p2 * k2: a = int(np.floor(p)) b = p1 else: a = int(np.ceil(p)) b = p2 try: R = _remainder_matrix_power(A, b) Q = np.linalg.matrix_power(A, a) return Q.dot(R) except np.linalg.LinAlgError: pass # If p is negative then we are going to give up. # If p is non-negative then we can fall back to generic funm. if p < 0: X = np.empty_like(A) X.fill(np.nan) return X else: p1 = p - np.floor(p) a = int(np.floor(p)) b = p1 R, info = funm(A, lambda x: pow(x, b), disp=False) Q = np.linalg.matrix_power(A, a) return Q.dot(R) def _logm_triu(T): """ Compute matrix logarithm of an upper triangular matrix. The matrix logarithm is the inverse of expm: expm(logm(`T`)) == `T` Parameters ---------- T : (N, N) array_like Upper triangular matrix whose logarithm to evaluate Returns ------- logm : (N, N) ndarray Matrix logarithm of `T` References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 .. [2] Nicholas J. Higham (2008) "Functions of Matrices: Theory and Computation" ISBN 978-0-898716-46-7 .. [3] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ T = np.asarray(T) if len(T.shape) != 2 or T.shape[0] != T.shape[1]: raise ValueError('expected an upper triangular square matrix') n, n = T.shape # Construct T0 with the appropriate type, # depending on the dtype and the spectrum of T. T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 if keep_it_real: T0 = T else: T0 = T.astype(complex) # Define bounds given in Table (2.1). theta = (None, 1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2, 1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1, 4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1, 6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1) R, s, m = _inverse_squaring_helper(T0, theta) # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1). # This requires the nodes and weights # corresponding to degree-m Gauss-Legendre quadrature. # These quadrature arrays need to be transformed from the [-1, 1] interval # to the [0, 1] interval. nodes, weights = scipy.special.p_roots(m) nodes = nodes.real if nodes.shape != (m,) or weights.shape != (m,): raise Exception('internal error') nodes = 0.5 + 0.5 * nodes weights = 0.5 * weights ident = np.identity(n) U = np.zeros_like(R) for alpha, beta in zip(weights, nodes): U += solve_triangular(ident + beta*R, alpha*R) U *= np.exp2(s) # Skip this step if the principal branch # does not exist at T0; this happens when a diagonal entry of T0 # is negative with imaginary part 0. has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0)) if has_principal_branch: # Recompute diagonal entries of U. U[np.diag_indices(n)] = np.log(np.diag(T0)) # Recompute superdiagonal entries of U. # This indexing of this code should be renovated # when newer np.diagonal() becomes available. for i in range(n-1): l1 = T0[i, i] l2 = T0[i+1, i+1] t12 = T0[i, i+1] U[i, i+1] = _logm_superdiag_entry(l1, l2, t12) # Return the logm of the upper triangular matrix. if not np.array_equal(U, np.triu(U)): raise Exception('internal inconsistency') return U def _logm_force_nonsingular_triangular_matrix(T, inplace=False): # The input matrix should be upper triangular. # The eps is ad hoc and is not meant to be machine precision. tri_eps = 1e-20 abs_diag = np.absolute(np.diag(T)) if np.any(abs_diag == 0): exact_singularity_msg = 'The logm input matrix is exactly singular.' warnings.warn(exact_singularity_msg, LogmExactlySingularWarning) if not inplace: T = T.copy() n = T.shape[0] for i in range(n): if not T[i, i]: T[i, i] = tri_eps elif np.any(abs_diag < tri_eps): near_singularity_msg = 'The logm input matrix may be nearly singular.' warnings.warn(near_singularity_msg, LogmNearlySingularWarning) return T def _logm(A): """ Compute the matrix logarithm. See the logm docstring in matfuncs.py for more info. Notes ----- In this function we look at triangular matrices that are similar to the input matrix. If any diagonal entry of such a triangular matrix is exactly zero then the original matrix is singular. The matrix logarithm does not exist for such matrices, but in such cases we will pretend that the diagonal entries that are zero are actually slightly positive by an ad-hoc amount, in the interest of returning something more useful than NaN. This will cause a warning. """ A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') # If the input matrix dtype is integer then copy to a float dtype matrix. if issubclass(A.dtype.type, np.integer): A = np.asarray(A, dtype=float) keep_it_real = np.isrealobj(A) try: if np.array_equal(A, np.triu(A)): A = _logm_force_nonsingular_triangular_matrix(A) if np.min(np.diag(A)) < 0: A = A.astype(complex) return _logm_triu(A) else: if keep_it_real: T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') T = _logm_force_nonsingular_triangular_matrix(T, inplace=True) U = _logm_triu(T) ZH = np.conjugate(Z).T return Z.dot(U).dot(ZH) except (SqrtmError, LogmError): X = np.empty_like(A) X.fill(np.nan) return X
28,050
30.553431
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/misc.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import LinAlgError from .blas import get_blas_funcs from .lapack import get_lapack_funcs __all__ = ['LinAlgError', 'LinAlgWarning', 'norm'] class LinAlgWarning(RuntimeWarning): """ The warning emitted when a linear algebra related operation is close to fail conditions of the algorithm or loss of accuracy is expected. """ pass def norm(a, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. This function is able to return one of seven different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- a : (M,) or (M, N) array_like Input array. If `axis` is None, `a` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `a` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `a` is 1-D) or a matrix norm (when `a` is 2-D) is returned. keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `a`. Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The ``axis`` and ``keepdims`` arguments are passed directly to ``numpy.linalg.norm`` and are only usable if they are supported by the version of numpy in use. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from scipy.linalg import norm >>> a = np.arange(9) - 4.0 >>> a array([-4., -3., -2., -1., 0., 1., 2., 3., 4.]) >>> b = a.reshape((3, 3)) >>> b array([[-4., -3., -2.], [-1., 0., 1.], [ 2., 3., 4.]]) >>> norm(a) 7.745966692414834 >>> norm(b) 7.745966692414834 >>> norm(b, 'fro') 7.745966692414834 >>> norm(a, np.inf) 4 >>> norm(b, np.inf) 9 >>> norm(a, -np.inf) 0 >>> norm(b, -np.inf) 2 >>> norm(a, 1) 20 >>> norm(b, 1) 7 >>> norm(a, -1) -4.6566128774142013e-010 >>> norm(b, -1) 6 >>> norm(a, 2) 7.745966692414834 >>> norm(b, 2) 7.3484692283495345 >>> norm(a, -2) 0 >>> norm(b, -2) 1.8570331885190563e-016 >>> norm(a, 3) 5.8480354764257312 >>> norm(a, -3) 0 """ # Differs from numpy only in non-finite handling and the use of blas. a = np.asarray_chkfinite(a) # Only use optimized norms if axis and keepdims are not specified. if a.dtype.char in 'fdFD' and axis is None and not keepdims: if ord in (None, 2) and (a.ndim == 1): # use blas for fast and stable euclidean norm nrm2 = get_blas_funcs('nrm2', dtype=a.dtype) return nrm2(a) if a.ndim == 2 and axis is None and not keepdims: # Use lapack for a couple fast matrix norms. # For some reason the *lange frobenius norm is slow. lange_args = None # Make sure this works if the user uses the axis keywords # to apply the norm to the transpose. if ord == 1: if np.isfortran(a): lange_args = '1', a elif np.isfortran(a.T): lange_args = 'i', a.T elif ord == np.inf: if np.isfortran(a): lange_args = 'i', a elif np.isfortran(a.T): lange_args = '1', a.T if lange_args: lange = get_lapack_funcs('lange', dtype=a.dtype) return lange(*lange_args) # Filter out the axis and keepdims arguments if they aren't used so they # are never inadvertently passed to a version of numpy that doesn't # support them. if axis is not None: if keepdims: return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims) return np.linalg.norm(a, ord=ord, axis=axis) return np.linalg.norm(a, ord=ord) def _datacopied(arr, original): """ Strict check for `arr` not sharing any data with `original`, under the assumption that arr = asarray(original) """ if arr is original: return False if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): return False return arr.base is None
6,108
31.494681
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp_qr.py
"""QR decomposition functions.""" from __future__ import division, print_function, absolute_import import numpy # Local imports from .lapack import get_lapack_funcs from .misc import _datacopied __all__ = ['qr', 'qr_multiply', 'rq'] def safecall(f, name, *args, **kwargs): """Call a LAPACK routine, determining lwork automatically and handling error return values""" lwork = kwargs.get("lwork", None) if lwork in (None, -1): kwargs['lwork'] = -1 ret = f(*args, **kwargs) kwargs['lwork'] = ret[-2][0].real.astype(numpy.int) ret = f(*args, **kwargs) if ret[-1] < 0: raise ValueError("illegal value in %d-th argument of internal %s" % (-ret[-1], name)) return ret[:-2] def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False, check_finite=True): """ Compute QR decomposition of a matrix. Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal and R upper triangular. Parameters ---------- a : (M, N) array_like Matrix to be decomposed overwrite_a : bool, optional Whether data in a is overwritten (may improve performance) lwork : int, optional Work array size, lwork >= a.shape[1]. If None or -1, an optimal size is computed. mode : {'full', 'r', 'economic', 'raw'}, optional Determines what information is to be returned: either both Q and R ('full', default), only R ('r') or both Q and R but computed in economy-size ('economic', see Notes). The final option 'raw' (added in Scipy 0.11) makes the function return two matrices (Q, TAU) in the internal format used by LAPACK. pivoting : bool, optional Whether or not factorization should include pivoting for rank-revealing qr decomposition. If pivoting, compute the decomposition ``A P = Q R`` as above, but where P is chosen such that the diagonal of R is non-increasing. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- Q : float or complex ndarray Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if ``mode='r'``. R : float or complex ndarray Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``. P : int ndarray Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``. Raises ------ LinAlgError Raised if decomposition fails Notes ----- This is an interface to the LAPACK routines dgeqrf, zgeqrf, dorgqr, zungqr, dgeqp3, and zgeqp3. If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead of (M,M) and (M,N), with ``K=min(M,N)``. Examples -------- >>> from scipy import random, linalg, dot, diag, all, allclose >>> a = random.randn(9, 6) >>> q, r = linalg.qr(a) >>> allclose(a, np.dot(q, r)) True >>> q.shape, r.shape ((9, 9), (9, 6)) >>> r2 = linalg.qr(a, mode='r') >>> allclose(r, r2) True >>> q3, r3 = linalg.qr(a, mode='economic') >>> q3.shape, r3.shape ((9, 6), (6, 6)) >>> q4, r4, p4 = linalg.qr(a, pivoting=True) >>> d = abs(diag(r4)) >>> all(d[1:] <= d[:-1]) True >>> allclose(a[:, p4], dot(q4, r4)) True >>> q4.shape, r4.shape, p4.shape ((9, 9), (9, 6), (6,)) >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True) >>> q5.shape, r5.shape, p5.shape ((9, 6), (6, 6), (6,)) """ # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor # 'qr' are used below. # 'raw' is used internally by qr_multiply if mode not in ['full', 'qr', 'r', 'economic', 'raw']: raise ValueError("Mode argument should be one of ['full', 'r'," "'economic', 'raw']") if check_finite: a1 = numpy.asarray_chkfinite(a) else: a1 = numpy.asarray(a) if len(a1.shape) != 2: raise ValueError("expected 2D array") M, N = a1.shape overwrite_a = overwrite_a or (_datacopied(a1, a)) if pivoting: geqp3, = get_lapack_funcs(('geqp3',), (a1,)) qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a) jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1 else: geqrf, = get_lapack_funcs(('geqrf',), (a1,)) qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork, overwrite_a=overwrite_a) if mode not in ['economic', 'raw'] or M < N: R = numpy.triu(qr) else: R = numpy.triu(qr[:N, :]) if pivoting: Rj = R, jpvt else: Rj = R, if mode == 'r': return Rj elif mode == 'raw': return ((qr, tau),) + Rj gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,)) if M < N: Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau, lwork=lwork, overwrite_a=1) elif mode == 'economic': Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork, overwrite_a=1) else: t = qr.dtype.char qqr = numpy.empty((M, M), dtype=t) qqr[:, :N] = qr Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork, overwrite_a=1) return (Q,) + Rj def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False, overwrite_a=False, overwrite_c=False): """ Calculate the QR decomposition and multiply Q with a matrix. Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal and R upper triangular. Multiply Q with a vector or a matrix c. Parameters ---------- a : (M, N), array_like Input array c : array_like Input array to be multiplied by ``q``. mode : {'left', 'right'}, optional ``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if mode is 'right'. The shape of c must be appropriate for the matrix multiplications, if mode is 'left', ``min(a.shape) == c.shape[0]``, if mode is 'right', ``a.shape[0] == c.shape[1]``. pivoting : bool, optional Whether or not factorization should include pivoting for rank-revealing qr decomposition, see the documentation of qr. conjugate : bool, optional Whether Q should be complex-conjugated. This might be faster than explicit conjugation. overwrite_a : bool, optional Whether data in a is overwritten (may improve performance) overwrite_c : bool, optional Whether data in c is overwritten (may improve performance). If this is used, c must be big enough to keep the result, i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'. Returns ------- CQ : ndarray The product of ``Q`` and ``c``. R : (K, N), ndarray R array of the resulting QR factorization where ``K = min(M, N)``. P : (N,) ndarray Integer pivot array. Only returned when ``pivoting=True``. Raises ------ LinAlgError Raised if QR decomposition fails. Notes ----- This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``, ``?UNMQR``, and ``?GEQP3``. .. versionadded:: 0.11.0 Examples -------- >>> from scipy.linalg import qr_multiply, qr >>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]]) >>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1) >>> qc array([[-1., 1., -1.], [-1., -1., 1.], [-1., -1., -1.], [-1., 1., 1.]]) >>> r1 array([[-6., -3., -5. ], [ 0., -1., -1.11022302e-16], [ 0., 0., -1. ]]) >>> piv1 array([1, 0, 2], dtype=int32) >>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1) >>> np.allclose(2*q2 - qc, np.zeros((4, 3))) True """ if mode not in ['left', 'right']: raise ValueError("Mode argument can only be 'left' or 'right' but " "not '{}'".format(mode)) c = numpy.asarray_chkfinite(c) if c.ndim < 2: onedim = True c = numpy.atleast_2d(c) if mode == "left": c = c.T else: onedim = False a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr M, N = a.shape if mode == 'left': if c.shape[0] != min(M, N + overwrite_c*(M-N)): raise ValueError('Array shapes are not compatible for Q @ c' ' operation: {} vs {}'.format(a.shape, c.shape)) else: if M != c.shape[1]: raise ValueError('Array shapes are not compatible for c @ Q' ' operation: {} vs {}'.format(c.shape, a.shape)) raw = qr(a, overwrite_a, None, "raw", pivoting) Q, tau = raw[0] gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,)) if gor_un_mqr.typecode in ('s', 'd'): trans = "T" else: trans = "C" Q = Q[:, :min(M, N)] if M > N and mode == "left" and not overwrite_c: if conjugate: cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F") cc[:, :N] = c.T else: cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F") cc[:N, :] = c trans = "N" if conjugate: lr = "R" else: lr = "L" overwrite_c = True elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate: cc = c.T if mode == "left": lr = "R" else: lr = "L" else: trans = "N" cc = c if mode == "left": lr = "L" else: lr = "R" cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc, overwrite_c=overwrite_c) if trans != "N": cQ = cQ.T if mode == "right": cQ = cQ[:, :min(M, N)] if onedim: cQ = cQ.ravel() return (cQ,) + raw[1:] def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True): """ Compute RQ decomposition of a matrix. Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal and R upper triangular. Parameters ---------- a : (M, N) array_like Matrix to be decomposed overwrite_a : bool, optional Whether data in a is overwritten (may improve performance) lwork : int, optional Work array size, lwork >= a.shape[1]. If None or -1, an optimal size is computed. mode : {'full', 'r', 'economic'}, optional Determines what information is to be returned: either both Q and R ('full', default), only R ('r') or both Q and R but computed in economy-size ('economic', see Notes). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : float or complex ndarray Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``. Q : float or complex ndarray Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned if ``mode='r'``. Raises ------ LinAlgError If decomposition fails. Notes ----- This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf, sorgrq, dorgrq, cungrq and zungrq. If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead of (N,N) and (M,N), with ``K=min(M,N)``. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(6, 9) >>> r, q = linalg.rq(a) >>> np.allclose(a, r @ q) True >>> r.shape, q.shape ((6, 9), (9, 9)) >>> r2 = linalg.rq(a, mode='r') >>> np.allclose(r, r2) True >>> r3, q3 = linalg.rq(a, mode='economic') >>> r3.shape, q3.shape ((6, 6), (6, 9)) """ if mode not in ['full', 'r', 'economic']: raise ValueError( "Mode argument should be one of ['full', 'r', 'economic']") if check_finite: a1 = numpy.asarray_chkfinite(a) else: a1 = numpy.asarray(a) if len(a1.shape) != 2: raise ValueError('expected matrix') M, N = a1.shape overwrite_a = overwrite_a or (_datacopied(a1, a)) gerqf, = get_lapack_funcs(('gerqf',), (a1,)) rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork, overwrite_a=overwrite_a) if not mode == 'economic' or N < M: R = numpy.triu(rq, N-M) else: R = numpy.triu(rq[-M:, -M:]) if mode == 'r': return R gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,)) if N < M: Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork, overwrite_a=1) elif mode == 'economic': Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork, overwrite_a=1) else: rq1 = numpy.empty((N, N), dtype=rq.dtype) rq1[-M:] = rq Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork, overwrite_a=1) return R, Q
13,507
30.783529
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_procrustes.py
""" Solve the orthogonal Procrustes problem. """ from __future__ import division, print_function, absolute_import import numpy as np from .decomp_svd import svd __all__ = ['orthogonal_procrustes'] def orthogonal_procrustes(A, B, check_finite=True): """ Compute the matrix solution of the orthogonal Procrustes problem. Given matrices A and B of equal shape, find an orthogonal matrix R that most closely maps A to B using the algorithm given in [1]_. Parameters ---------- A : (M, N) array_like Matrix to be mapped. B : (M, N) array_like Target matrix. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : (N, N) ndarray The matrix solution of the orthogonal Procrustes problem. Minimizes the Frobenius norm of ``(A @ R) - B``, subject to ``R.T @ R = I``. scale : float Sum of the singular values of ``A.T @ B``. Raises ------ ValueError If the input array shapes don't match or if check_finite is True and the arrays contain Inf or NaN. Notes ----- Note that unlike higher level Procrustes analyses of spatial data, this function only uses orthogonal transformations like rotations and reflections, and it does not use scaling or translation. .. versionadded:: 0.15.0 References ---------- .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996. Examples -------- >>> from scipy.linalg import orthogonal_procrustes >>> A = np.array([[ 2, 0, 1], [-2, 0, 0]]) Flip the order of columns and check for the anti-diagonal mapping >>> R, sca = orthogonal_procrustes(A, np.fliplr(A)) >>> R array([[-5.34384992e-17, 0.00000000e+00, 1.00000000e+00], [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00], [ 1.00000000e+00, 0.00000000e+00, -7.85941422e-17]]) >>> sca 9.0 """ if check_finite: A = np.asarray_chkfinite(A) B = np.asarray_chkfinite(B) else: A = np.asanyarray(A) B = np.asanyarray(B) if A.ndim != 2: raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) if A.shape != B.shape: raise ValueError('the shapes of A and B differ (%s vs %s)' % ( A.shape, B.shape)) # Be clever with transposes, with the intention to save memory. u, w, vt = svd(B.T.dot(A).T) R = u.dot(vt) scale = w.sum() return R, scale
2,787
29.304348
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/matfuncs.py
# # Author: Travis Oliphant, March 2002 # from __future__ import division, print_function, absolute_import __all__ = ['expm','cosm','sinm','tanm','coshm','sinhm', 'tanhm','logm','funm','signm','sqrtm', 'expm_frechet', 'expm_cond', 'fractional_matrix_power'] from numpy import (Inf, dot, diag, product, logical_not, ravel, transpose, conjugate, absolute, amax, sign, isfinite, single) import numpy as np # Local imports from .misc import norm from .basic import solve, inv from .special_matrices import triu from .decomp_svd import svd from .decomp_schur import schur, rsf2csf from ._expm_frechet import expm_frechet, expm_cond from ._matfuncs_sqrtm import sqrtm eps = np.finfo(float).eps feps = np.finfo(single).eps _array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} ############################################################################### # Utility functions. def _asarray_square(A): """ Wraps asarray with the extra requirement that the input be a square matrix. The motivation is that the matfuncs module has real functions that have been lifted to square matrix functions. Parameters ---------- A : array_like A square matrix. Returns ------- out : ndarray An ndarray copy or view or other representation of A. """ A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected square array_like input') return A def _maybe_real(A, B, tol=None): """ Return either B or the real part of B, depending on properties of A and B. The motivation is that B has been computed as a complicated function of A, and B may be perturbed by negligible imaginary components. If A is real and B is complex with small imaginary components, then return a real copy of B. The assumption in that case would be that the imaginary components of B are numerical artifacts. Parameters ---------- A : ndarray Input array whose type is to be checked as real vs. complex. B : ndarray Array to be returned, possibly without its imaginary part. tol : float Absolute tolerance. Returns ------- out : real or complex array Either the input array B or only the real part of the input array B. """ # Note that booleans and integers compare as real. if np.isrealobj(A) and np.iscomplexobj(B): if tol is None: tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]] if np.allclose(B.imag, 0.0, atol=tol): B = B.real return B ############################################################################### # Matrix functions. def fractional_matrix_power(A, t): """ Compute the fractional power of a matrix. Proceeds according to the discussion in section (6) of [1]_. Parameters ---------- A : (N, N) array_like Matrix whose fractional power to evaluate. t : float Fractional power. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 Examples -------- >>> from scipy.linalg import fractional_matrix_power >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> b = fractional_matrix_power(a, 0.5) >>> b array([[ 0.75592895, 1.13389342], [ 0.37796447, 1.88982237]]) >>> np.dot(b, b) # Verify square root array([[ 1., 3.], [ 1., 4.]]) """ # This fixes some issue with imports; # this function calls onenormest which is in scipy.sparse. A = _asarray_square(A) import scipy.linalg._matfuncs_inv_ssq return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t) def logm(A, disp=True): """ Compute matrix logarithm. The matrix logarithm is the inverse of expm: expm(logm(`A`)) == `A` Parameters ---------- A : (N, N) array_like Matrix whose logarithm to evaluate disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) Returns ------- logm : (N, N) ndarray Matrix logarithm of `A` errest : float (if disp == False) 1-norm of the estimated error, ||err||_1 / ||A||_1 References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 .. [2] Nicholas J. Higham (2008) "Functions of Matrices: Theory and Computation" ISBN 978-0-898716-46-7 .. [3] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 Examples -------- >>> from scipy.linalg import logm, expm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> b = logm(a) >>> b array([[-1.02571087, 2.05142174], [ 0.68380725, 1.02571087]]) >>> expm(b) # Verify expm(logm(a)) returns a array([[ 1., 3.], [ 1., 4.]]) """ A = _asarray_square(A) # Avoid circular import ... this is OK, right? import scipy.linalg._matfuncs_inv_ssq F = scipy.linalg._matfuncs_inv_ssq._logm(A) F = _maybe_real(A, F) errtol = 1000*eps #TODO use a better error approximation errest = norm(expm(F)-A,1) / norm(A,1) if disp: if not isfinite(errest) or errest >= errtol: print("logm result may be inaccurate, approximate err =", errest) return F else: return F, errest def expm(A): """ Compute the matrix exponential using Pade approximation. Parameters ---------- A : (N, N) array_like or sparse matrix Matrix to be exponentiated. Returns ------- expm : (N, N) ndarray Matrix exponential of `A`. References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) "A New Scaling and Squaring Algorithm for the Matrix Exponential." SIAM Journal on Matrix Analysis and Applications. 31 (3). pp. 970-989. ISSN 1095-7162 Examples -------- >>> from scipy.linalg import expm, sinm, cosm Matrix version of the formula exp(0) = 1: >>> expm(np.zeros((2,2))) array([[ 1., 0.], [ 0., 1.]]) Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) applied to a matrix: >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) >>> expm(1j*a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) >>> cosm(a) + 1j*sinm(a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) """ # Input checking and conversion is provided by sparse.linalg.expm(). import scipy.sparse.linalg return scipy.sparse.linalg.expm(A) def cosm(A): """ Compute the matrix cosine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array Returns ------- cosm : (N, N) ndarray Matrix cosine of A Examples -------- >>> from scipy.linalg import expm, sinm, cosm Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) applied to a matrix: >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) >>> expm(1j*a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) >>> cosm(a) + 1j*sinm(a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) """ A = _asarray_square(A) if np.iscomplexobj(A): return 0.5*(expm(1j*A) + expm(-1j*A)) else: return expm(1j*A).real def sinm(A): """ Compute the matrix sine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- sinm : (N, N) ndarray Matrix sine of `A` Examples -------- >>> from scipy.linalg import expm, sinm, cosm Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) applied to a matrix: >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) >>> expm(1j*a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) >>> cosm(a) + 1j*sinm(a) array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) """ A = _asarray_square(A) if np.iscomplexobj(A): return -0.5j*(expm(1j*A) - expm(-1j*A)) else: return expm(1j*A).imag def tanm(A): """ Compute the matrix tangent. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- tanm : (N, N) ndarray Matrix tangent of `A` Examples -------- >>> from scipy.linalg import tanm, sinm, cosm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> t = tanm(a) >>> t array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]]) Verify tanm(a) = sinm(a).dot(inv(cosm(a))) >>> s = sinm(a) >>> c = cosm(a) >>> s.dot(np.linalg.inv(c)) array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]]) """ A = _asarray_square(A) return _maybe_real(A, solve(cosm(A), sinm(A))) def coshm(A): """ Compute the hyperbolic matrix cosine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- coshm : (N, N) ndarray Hyperbolic matrix cosine of `A` Examples -------- >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> c = coshm(a) >>> c array([[ 11.24592233, 38.76236492], [ 12.92078831, 50.00828725]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> t = tanhm(a) >>> s = sinhm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]]) """ A = _asarray_square(A) return _maybe_real(A, 0.5 * (expm(A) + expm(-A))) def sinhm(A): """ Compute the hyperbolic matrix sine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- sinhm : (N, N) ndarray Hyperbolic matrix sine of `A` Examples -------- >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> s = sinhm(a) >>> s array([[ 10.57300653, 39.28826594], [ 13.09608865, 49.86127247]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> t = tanhm(a) >>> c = coshm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]]) """ A = _asarray_square(A) return _maybe_real(A, 0.5 * (expm(A) - expm(-A))) def tanhm(A): """ Compute the hyperbolic matrix tangent. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array Returns ------- tanhm : (N, N) ndarray Hyperbolic matrix tangent of `A` Examples -------- >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> t = tanhm(a) >>> t array([[ 0.3428582 , 0.51987926], [ 0.17329309, 0.86273746]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> s = sinhm(a) >>> c = coshm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]]) """ A = _asarray_square(A) return _maybe_real(A, solve(coshm(A), sinhm(A))) def funm(A, func, disp=True): """ Evaluate a matrix function specified by a callable. Returns the value of matrix-valued function ``f`` at `A`. The function ``f`` is an extension of the scalar-valued function `func` to matrices. Parameters ---------- A : (N, N) array_like Matrix at which to evaluate the function func : callable Callable object that evaluates a scalar function f. Must be vectorized (eg. using vectorize). disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) Returns ------- funm : (N, N) ndarray Value of the matrix function specified by func evaluated at `A` errest : float (if disp == False) 1-norm of the estimated error, ||err||_1 / ||A||_1 Examples -------- >>> from scipy.linalg import funm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> funm(a, lambda x: x*x) array([[ 4., 15.], [ 5., 19.]]) >>> a.dot(a) array([[ 4., 15.], [ 5., 19.]]) Notes ----- This function implements the general algorithm based on Schur decomposition (Algorithm 9.1.1. in [1]_). If the input matrix is known to be diagonalizable, then relying on the eigendecomposition is likely to be faster. For example, if your matrix is Hermitian, you can do >>> from scipy.linalg import eigh >>> def funm_herm(a, func, check_finite=False): ... w, v = eigh(a, check_finite=check_finite) ... ## if you further know that your matrix is positive semidefinite, ... ## you can optionally guard against precision errors by doing ... # w = np.maximum(w, 0) ... w = func(w) ... return (v * w).dot(v.conj().T) References ---------- .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed. """ A = _asarray_square(A) # Perform Shur decomposition (lapack ?gees) T, Z = schur(A) T, Z = rsf2csf(T,Z) n,n = T.shape F = diag(func(diag(T))) # apply function to diagonal elements F = F.astype(T.dtype.char) # e.g. when F is real but T is complex minden = abs(T[0,0]) # implement Algorithm 11.1.1 from Golub and Van Loan # "matrix Computations." for p in range(1,n): for i in range(1,n-p+1): j = i + p s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1]) ksl = slice(i,j-1) val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1]) s = s + val den = T[j-1,j-1] - T[i-1,i-1] if den != 0.0: s = s / den F[i-1,j-1] = s minden = min(minden,abs(den)) F = dot(dot(Z, F), transpose(conjugate(Z))) F = _maybe_real(A, F) tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]] if minden == 0.0: minden = tol err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1))) if product(ravel(logical_not(isfinite(F))),axis=0): err = Inf if disp: if err > 1000*tol: print("funm result may be inaccurate, approximate err =", err) return F else: return F, err def signm(A, disp=True): """ Matrix sign function. Extension of the scalar sign(x) to matrices. Parameters ---------- A : (N, N) array_like Matrix at which to evaluate the sign function disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) Returns ------- signm : (N, N) ndarray Value of the sign function at `A` errest : float (if disp == False) 1-norm of the estimated error, ||err||_1 / ||A||_1 Examples -------- >>> from scipy.linalg import signm, eigvals >>> a = [[1,2,3], [1,2,1], [1,1,1]] >>> eigvals(a) array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j]) >>> eigvals(signm(a)) array([-1.+0.j, 1.+0.j, 1.+0.j]) """ A = _asarray_square(A) def rounded_sign(x): rx = np.real(x) if rx.dtype.char == 'f': c = 1e3*feps*amax(x) else: c = 1e3*eps*amax(x) return sign((absolute(rx) > c) * rx) result, errest = funm(A, rounded_sign, disp=0) errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]] if errest < errtol: return result # Handle signm of defective matrices: # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp., # 8:237-250,1981" for how to improve the following (currently a # rather naive) iteration process: # a = result # sometimes iteration converges faster but where?? # Shifting to avoid zero eigenvalues. How to ensure that shifting does # not change the spectrum too much? vals = svd(A, compute_uv=0) max_sv = np.amax(vals) # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1] # c = 0.5/min_nonzero_sv c = 0.5/max_sv S0 = A + c*np.identity(A.shape[0]) prev_errest = errest for i in range(100): iS0 = inv(S0) S0 = 0.5*(S0 + iS0) Pp = 0.5*(dot(S0,S0)+S0) errest = norm(dot(Pp,Pp)-Pp,1) if errest < errtol or prev_errest == errest: break prev_errest = errest if disp: if not isfinite(errest) or errest >= errtol: print("signm result may be inaccurate, approximate err =", errest) return S0 else: return S0, errest
18,286
26.253353
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp_svd.py
"""SVD decomposition functions.""" from __future__ import division, print_function, absolute_import import numpy from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip # Local imports. from .misc import LinAlgError, _datacopied from .lapack import get_lapack_funcs, _compute_lwork from .decomp import _asarray_validated from scipy._lib.six import string_types __all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space'] def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False, check_finite=True, lapack_driver='gesdd'): """ Singular Value Decomposition. Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and a 1-D array ``s`` of singular values (real, non-negative) such that ``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with main diagonal ``s``. Parameters ---------- a : (M, N) array_like Matrix to decompose. full_matrices : bool, optional If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``. If False, the shapes are ``(M, K)`` and ``(K, N)``, where ``K = min(M, N)``. compute_uv : bool, optional Whether to compute also ``U`` and ``Vh`` in addition to ``s``. Default is True. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. lapack_driver : {'gesdd', 'gesvd'}, optional Whether to use the more efficient divide-and-conquer approach (``'gesdd'``) or general rectangular approach (``'gesvd'``) to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach. Default is ``'gesdd'``. .. versionadded:: 0.18 Returns ------- U : ndarray Unitary matrix having left singular vectors as columns. Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`. s : ndarray The singular values, sorted in non-increasing order. Of shape (K,), with ``K = min(M, N)``. Vh : ndarray Unitary matrix having right singular vectors as rows. Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`. For ``compute_uv=False``, only ``s`` is returned. Raises ------ LinAlgError If SVD computation does not converge. See also -------- svdvals : Compute singular values of a matrix. diagsvd : Construct the Sigma matrix, given the vector s. Examples -------- >>> from scipy import linalg >>> m, n = 9, 6 >>> a = np.random.randn(m, n) + 1.j*np.random.randn(m, n) >>> U, s, Vh = linalg.svd(a) >>> U.shape, s.shape, Vh.shape ((9, 9), (6,), (6, 6)) Reconstruct the original matrix from the decomposition: >>> sigma = np.zeros((m, n)) >>> for i in range(min(m, n)): ... sigma[i, i] = s[i] >>> a1 = np.dot(U, np.dot(sigma, Vh)) >>> np.allclose(a, a1) True Alternatively, use ``full_matrices=False`` (notice that the shape of ``U`` is then ``(m, n)`` instead of ``(m, m)``): >>> U, s, Vh = linalg.svd(a, full_matrices=False) >>> U.shape, s.shape, Vh.shape ((9, 6), (6,), (6, 6)) >>> S = np.diag(s) >>> np.allclose(a, np.dot(U, np.dot(S, Vh))) True >>> s2 = linalg.svd(a, compute_uv=False) >>> np.allclose(s, s2) True """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2: raise ValueError('expected matrix') m, n = a1.shape overwrite_a = overwrite_a or (_datacopied(a1, a)) if not isinstance(lapack_driver, string_types): raise TypeError('lapack_driver must be a string') if lapack_driver not in ('gesdd', 'gesvd'): raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"' % (lapack_driver,)) funcs = (lapack_driver, lapack_driver + '_lwork') gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,)) # compute optimal lwork lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1], compute_uv=compute_uv, full_matrices=full_matrices) # perform decomposition u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork, full_matrices=full_matrices, overwrite_a=overwrite_a) if info > 0: raise LinAlgError("SVD did not converge") if info < 0: raise ValueError('illegal value in %d-th argument of internal gesdd' % -info) if compute_uv: return u, s, v else: return s def svdvals(a, overwrite_a=False, check_finite=True): """ Compute singular values of a matrix. Parameters ---------- a : (M, N) array_like Matrix to decompose. overwrite_a : bool, optional Whether to overwrite `a`; may improve performance. Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- s : (min(M, N),) ndarray The singular values, sorted in decreasing order. Raises ------ LinAlgError If SVD computation does not converge. Notes ----- ``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its handling of the edge case of empty ``a``, where it returns an empty sequence: >>> a = np.empty((0, 2)) >>> from scipy.linalg import svdvals >>> svdvals(a) array([], dtype=float64) See Also -------- svd : Compute the full singular value decomposition of a matrix. diagsvd : Construct the Sigma matrix, given the vector s. Examples -------- >>> from scipy.linalg import svdvals >>> m = np.array([[1.0, 0.0], ... [2.0, 3.0], ... [1.0, 1.0], ... [0.0, 2.0], ... [1.0, 0.0]]) >>> svdvals(m) array([ 4.28091555, 1.63516424]) We can verify the maximum singular value of `m` by computing the maximum length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane. We approximate "all" the unit vectors with a large sample. Because of linearity, we only need the unit vectors with angles in [0, pi]. >>> t = np.linspace(0, np.pi, 2000) >>> u = np.array([np.cos(t), np.sin(t)]) >>> np.linalg.norm(m.dot(u), axis=0).max() 4.2809152422538475 `p` is a projection matrix with rank 1. With exact arithmetic, its singular values would be [1, 0, 0, 0]. >>> v = np.array([0.1, 0.3, 0.9, 0.3]) >>> p = np.outer(v, v) >>> svdvals(p) array([ 1.00000000e+00, 2.02021698e-17, 1.56692500e-17, 8.15115104e-34]) The singular values of an orthogonal matrix are all 1. Here we create a random orthogonal matrix by using the `rvs()` method of `scipy.stats.ortho_group`. >>> from scipy.stats import ortho_group >>> np.random.seed(123) >>> orth = ortho_group.rvs(4) >>> svdvals(orth) array([ 1., 1., 1., 1.]) """ a = _asarray_validated(a, check_finite=check_finite) if a.size: return svd(a, compute_uv=0, overwrite_a=overwrite_a, check_finite=False) elif len(a.shape) != 2: raise ValueError('expected matrix') else: return numpy.empty(0) def diagsvd(s, M, N): """ Construct the sigma matrix in SVD from singular values and size M, N. Parameters ---------- s : (M,) or (N,) array_like Singular values M : int Size of the matrix whose singular values are `s`. N : int Size of the matrix whose singular values are `s`. Returns ------- S : (M, N) ndarray The S-matrix in the singular value decomposition See Also -------- svd : Singular value decomposition of a matrix svdvals : Compute singular values of a matrix. Examples -------- >>> from scipy.linalg import diagsvd >>> vals = np.array([1, 2, 3]) # The array representing the computed svd >>> diagsvd(vals, 3, 4) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0]]) >>> diagsvd(vals, 4, 3) array([[1, 0, 0], [0, 2, 0], [0, 0, 3], [0, 0, 0]]) """ part = diag(s) typ = part.dtype.char MorN = len(s) if MorN == M: return r_['-1', part, zeros((M, N-M), typ)] elif MorN == N: return r_[part, zeros((M-N, N), typ)] else: raise ValueError("Length of s must be M or N.") # Orthonormal decomposition def orth(A, rcond=None): """ Construct an orthonormal basis for the range of A using SVD Parameters ---------- A : (M, N) array_like Input array rcond : float, optional Relative condition number. Singular values ``s`` smaller than ``rcond * max(s)`` are considered zero. Default: floating point eps * max(M,N). Returns ------- Q : (M, K) ndarray Orthonormal basis for the range of A. K = effective rank of A, as determined by rcond See also -------- svd : Singular value decomposition of a matrix null_space : Matrix null space Examples -------- >>> from scipy.linalg import orth >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array >>> orth(A) array([[0., 1.], [1., 0.]]) >>> orth(A.T) array([[0., 1.], [1., 0.], [0., 0.]]) """ u, s, vh = svd(A, full_matrices=False) M, N = u.shape[0], vh.shape[1] if rcond is None: rcond = numpy.finfo(s.dtype).eps * max(M, N) tol = numpy.amax(s) * rcond num = numpy.sum(s > tol, dtype=int) Q = u[:, :num] return Q def null_space(A, rcond=None): """ Construct an orthonormal basis for the null space of A using SVD Parameters ---------- A : (M, N) array_like Input array rcond : float, optional Relative condition number. Singular values ``s`` smaller than ``rcond * max(s)`` are considered zero. Default: floating point eps * max(M,N). Returns ------- Z : (N, K) ndarray Orthonormal basis for the null space of A. K = dimension of effective null space, as determined by rcond See also -------- svd : Singular value decomposition of a matrix orth : Matrix range Examples -------- One-dimensional null space: >>> from scipy.linalg import null_space >>> A = np.array([[1, 1], [1, 1]]) >>> ns = null_space(A) >>> ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector array([[ 0.70710678], [-0.70710678]]) Two-dimensional null space: >>> B = np.random.rand(3, 5) >>> Z = null_space(B) >>> Z.shape (5, 2) >>> np.allclose(B.dot(Z), 0) True The basis vectors are orthonormal (up to rounding error): >>> Z.T.dot(Z) array([[ 1.00000000e+00, 6.92087741e-17], [ 6.92087741e-17, 1.00000000e+00]]) """ u, s, vh = svd(A, full_matrices=True) M, N = u.shape[0], vh.shape[1] if rcond is None: rcond = numpy.finfo(s.dtype).eps * max(M, N) tol = numpy.amax(s) * rcond num = numpy.sum(s > tol, dtype=int) Q = vh[num:,:].T.conj() return Q def subspace_angles(A, B): r""" Compute the subspace angles between two matrices. Parameters ---------- A : (M, N) array_like The first input array. B : (M, K) array_like The second input array. Returns ------- angles : ndarray, shape (min(N, K),) The subspace angles between the column spaces of `A` and `B`. See Also -------- orth svd Notes ----- This computes the subspace angles according to the formula provided in [1]_. For equivalence with MATLAB and Octave behavior, use ``angles[0]``. .. versionadded:: 1.0 References ---------- .. [1] Knyazev A, Argentati M (2002) Principal Angles between Subspaces in an A-Based Scalar Product: Algorithms and Perturbation Estimates. SIAM J. Sci. Comput. 23:2008-2040. Examples -------- A Hadamard matrix, which has orthogonal columns, so we expect that the suspace angle to be :math:`\frac{\pi}{2}`: >>> from scipy.linalg import hadamard, subspace_angles >>> H = hadamard(4) >>> print(H) [[ 1 1 1 1] [ 1 -1 1 -1] [ 1 1 -1 -1] [ 1 -1 -1 1]] >>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:])) array([ 90., 90.]) And the subspace angle of a matrix to itself should be zero: >>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps array([ True, True], dtype=bool) The angles between non-orthogonal subspaces are in between these extremes: >>> x = np.random.RandomState(0).randn(4, 3) >>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]])) array([ 55.832]) """ # Steps here omit the U and V calculation steps from the paper # 1. Compute orthonormal bases of column-spaces A = _asarray_validated(A, check_finite=True) if len(A.shape) != 2: raise ValueError('expected 2D array, got shape %s' % (A.shape,)) QA = orth(A) del A B = _asarray_validated(B, check_finite=True) if len(B.shape) != 2: raise ValueError('expected 2D array, got shape %s' % (B.shape,)) if len(B) != len(QA): raise ValueError('A and B must have the same number of rows, got ' '%s and %s' % (QA.shape[0], B.shape[0])) QB = orth(B) del B # 2. Compute SVD for cosine QA_T_QB = dot(QA.T, QB) sigma = svdvals(QA_T_QB) # 3. Compute matrix B if QA.shape[1] >= QB.shape[1]: B = QB - dot(QA, QA_T_QB) else: B = QA - dot(QB, QA_T_QB.T) del QA, QB, QA_T_QB # 4. Compute SVD for sine mask = sigma ** 2 >= 0.5 if mask.any(): mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.)) else: mu_arcsin = 0. # 5. Compute the principal angles theta = where(mask, mu_arcsin, arccos(clip(sigma, -1., 1.))) return theta
14,561
28.477733
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_matfuncs_sqrtm.py
""" Matrix square root for general matrices and for upper triangular matrices. This module exists to avoid cyclic imports. """ from __future__ import division, print_function, absolute_import __all__ = ['sqrtm'] import numpy as np from scipy._lib._util import _asarray_validated # Local imports from .misc import norm from .lapack import ztrsyl, dtrsyl from .decomp_schur import schur, rsf2csf class SqrtmError(np.linalg.LinAlgError): pass def _sqrtm_triu(T, blocksize=64): """ Matrix square root of an upper triangular matrix. This is a helper function for `sqrtm` and `logm`. Parameters ---------- T : (N, N) array_like upper triangular Matrix whose square root to evaluate blocksize : int, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `T` References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. """ T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 if not keep_it_real: T_diag = T_diag.astype(complex) R = np.diag(np.sqrt(T_diag)) # Compute the number of blocks to use; use at least one block. n, n = T.shape nblocks = max(n // blocksize, 1) # Compute the smaller of the two sizes of blocks that # we will actually use, and compute the number of large blocks. bsmall, nlarge = divmod(n, nblocks) blarge = bsmall + 1 nsmall = nblocks - nlarge if nsmall * bsmall + nlarge * blarge != n: raise Exception('internal inconsistency') # Define the index range covered by each block. start_stop_pairs = [] start = 0 for count, size in ((nsmall, bsmall), (nlarge, blarge)): for i in range(count): start_stop_pairs.append((start, start + size)) start += size # Within-block interactions. for start, stop in start_stop_pairs: for j in range(start, stop): for i in range(j-1, start-1, -1): s = 0 if j - i > 1: s = R[i, i+1:j].dot(R[i+1:j, j]) denom = R[i, i] + R[j, j] num = T[i, j] - s if denom != 0: R[i, j] = (T[i, j] - s) / denom elif denom == 0 and num == 0: R[i, j] = 0 else: raise SqrtmError('failed to find the matrix square root') # Between-block interactions. for j in range(nblocks): jstart, jstop = start_stop_pairs[j] for i in range(j-1, -1, -1): istart, istop = start_stop_pairs[i] S = T[istart:istop, jstart:jstop] if j - i > 1: S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, jstart:jstop]) # Invoke LAPACK. # For more details, see the solve_sylvester implemention # and the fortran dtrsyl and ztrsyl docs. Rii = R[istart:istop, istart:istop] Rjj = R[jstart:jstop, jstart:jstop] if keep_it_real: x, scale, info = dtrsyl(Rii, Rjj, S) else: x, scale, info = ztrsyl(Rii, Rjj, S) R[istart:istop, jstart:jstop] = x * scale # Return the matrix square root. return R def sqrtm(A, disp=True, blocksize=64): """ Matrix square root. Parameters ---------- A : (N, N) array_like Matrix whose square root to evaluate disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) blocksize : integer, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `A` errest : float (if disp == False) Frobenius norm of the estimated error, ||err||_F / ||A||_F References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. Examples -------- >>> from scipy.linalg import sqrtm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> r = sqrtm(a) >>> r array([[ 0.75592895, 1.13389342], [ 0.37796447, 1.88982237]]) >>> r.dot(r) array([[ 1., 3.], [ 1., 4.]]) """ A = _asarray_validated(A, check_finite=True, as_inexact=True) if len(A.shape) != 2: raise ValueError("Non-matrix input to matrix function.") if blocksize < 1: raise ValueError("The blocksize should be at least 1.") keep_it_real = np.isrealobj(A) if keep_it_real: T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') failflag = False try: R = _sqrtm_triu(T, blocksize=blocksize) ZH = np.conjugate(Z).T X = Z.dot(R).dot(ZH) except SqrtmError: failflag = True X = np.empty_like(A) X.fill(np.nan) if disp: if failflag: print("Failed to find a square root.") return X else: try: arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro') except ValueError: # NaNs in matrix arg2 = np.inf return X, arg2
5,874
28.822335
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_decomp_ldl.py
from __future__ import division, print_function, absolute_import from warnings import warn import numpy as np from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like) from .decomp import _asarray_validated from .lapack import get_lapack_funcs, _compute_lwork __all__ = ['ldl'] def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True): """ Computes the LDLt or Bunch-Kaufman factorization of a symmetric/ hermitian matrix. This function returns a block diagonal matrix D consisting blocks of size at most 2x2 and also a possibly permuted unit lower triangular matrix ``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T`` holds. If ``lower`` is False then (again possibly permuted) upper triangular matrices are returned as outer factors. The permutation array can be used to triangularize the outer factors simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower triangular matrix. This is also equivalent to multiplication with a permutation matrix ``P.dot(lu)`` where ``P`` is a column-permuted identity matrix ``I[:, perm]``. Depending on the value of the boolean ``lower``, only upper or lower triangular part of the input array is referenced. Hence a triangular matrix on entry would give the same result as if the full matrix is supplied. Parameters ---------- a : array_like Square input array lower : bool, optional This switches between the lower and upper triangular outer factors of the factorization. Lower triangular (``lower=True``) is the default. hermitian : bool, optional For complex-valued arrays, this defines whether ``a = a.conj().T`` or ``a = a.T`` is assumed. For real-valued arrays, this switch has no effect. overwrite_a : bool, optional Allow overwriting data in ``a`` (may enhance performance). The default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- lu : ndarray The (possibly) permuted upper/lower triangular outer factor of the factorization. d : ndarray The block diagonal multiplier of the factorization. perm : ndarray The row-permutation index array that brings lu into triangular form. Raises ------ ValueError If input array is not square. ComplexWarning If a complex-valued array with nonzero imaginary parts on the diagonal is given and hermitian is set to True. Examples -------- Given an upper triangular array `a` that represents the full symmetric array with its entries, obtain `l`, 'd' and the permutation vector `perm`: >>> import numpy as np >>> from scipy.linalg import ldl >>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]]) >>> lu, d, perm = ldl(a, lower=0) # Use the upper part >>> lu array([[ 0. , 0. , 1. ], [ 0. , 1. , -0.5], [ 1. , 1. , 1.5]]) >>> d array([[-5. , 0. , 0. ], [ 0. , 1.5, 0. ], [ 0. , 0. , 2. ]]) >>> perm array([2, 1, 0]) >>> lu[perm, :] array([[ 1. , 1. , 1.5], [ 0. , 1. , -0.5], [ 0. , 0. , 1. ]]) >>> lu.dot(d).dot(lu.T) array([[ 2., -1., 3.], [-1., 2., 0.], [ 3., 0., 1.]]) Notes ----- This function uses ``?SYTRF`` routines for symmetric matrices and ``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for the algorithm details. Depending on the ``lower`` keyword value, only lower or upper triangular part of the input array is referenced. Moreover, this keyword also defines the structure of the outer factors of the factorization. .. versionadded:: 1.1.0 See also -------- cholesky, lu References ---------- .. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating inertia and solving symmetric linear systems, Math. Comput. Vol.31, 1977. DOI: 10.2307/2005787 """ a = atleast_2d(_asarray_validated(A, check_finite=check_finite)) if a.shape[0] != a.shape[1]: raise ValueError('The input array "a" should be square.') # Return empty arrays for empty square input if a.size == 0: return empty_like(a), empty_like(a), np.array([], dtype=int) n = a.shape[0] r_or_c = complex if iscomplexobj(a) else float # Get the LAPACK routine if r_or_c is complex and hermitian: s, sl = 'hetrf', 'hetrf_lwork' if np.any(imag(diag(a))): warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal' 'are ignored. Use "hermitian=False" for factorization of' 'complex symmetric arrays.', ComplexWarning, stacklevel=2) else: s, sl = 'sytrf', 'sytrf_lwork' solver, solver_lwork = get_lapack_funcs((s, sl), (a,)) lwork = _compute_lwork(solver_lwork, n, lower=lower) ldu, piv, info = solver(a, lwork=lwork, lower=lower, overwrite_a=overwrite_a) if info < 0: raise ValueError('{} exited with the internal error "illegal value ' 'in argument number {}". See LAPACK documentation ' 'for the error codes.'.format(s.upper(), -info)) swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower) d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian) lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower) return lu, d, perm def _ldl_sanitize_ipiv(a, lower=True): """ This helper function takes the rather strangely encoded permutation array returned by the LAPACK routines ?(HE/SY)TRF and converts it into regularized permutation and diagonal pivot size format. Since FORTRAN uses 1-indexing and LAPACK uses different start points for upper and lower formats there are certain offsets in the indices used below. Let's assume a result where the matrix is 6x6 and there are two 2x2 and two 1x1 blocks reported by the routine. To ease the coding efforts, we still populate a 6-sized array and fill zeros as the following :: pivots = [2, 0, 2, 0, 1, 1] This denotes a diagonal matrix of the form :: [x x ] [x x ] [ x x ] [ x x ] [ x ] [ x] In other words, we write 2 when the 2x2 block is first encountered and automatically write 0 to the next entry and skip the next spin of the loop. Thus, a separate counter or array appends to keep track of block sizes are avoided. If needed, zeros can be filtered out later without losing the block structure. Parameters ---------- a : ndarray The permutation array ipiv returned by LAPACK lower : bool, optional The switch to select whether upper or lower triangle is chosen in the LAPACK call. Returns ------- swap_ : ndarray The array that defines the row/column swap operations. For example, if row two is swapped with row four, the result is [0, 3, 2, 3]. pivots : ndarray The array that defines the block diagonal structure as given above. """ n = a.size swap_ = arange(n) pivots = zeros_like(swap_, dtype=int) skip_2x2 = False # Some upper/lower dependent offset values # range (s)tart, r(e)nd, r(i)ncrement x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1) for ind in range(rs, re, ri): # If previous spin belonged already to a 2x2 block if skip_2x2: skip_2x2 = False continue cur_val = a[ind] # do we have a 1x1 block or not? if cur_val > 0: if cur_val != ind+1: # Index value != array value --> permutation required swap_[ind] = swap_[cur_val-1] pivots[ind] = 1 # Not. elif cur_val < 0 and cur_val == a[ind+x]: # first neg entry of 2x2 block identifier if -cur_val != ind+2: # Index value != array value --> permutation required swap_[ind+x] = swap_[-cur_val-1] pivots[ind+y] = 2 skip_2x2 = True else: # Doesn't make sense, give up raise ValueError('While parsing the permutation array ' 'in "scipy.linalg.ldl", invalid entries ' 'found. The array syntax is invalid.') return swap_, pivots def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True): """ Helper function to extract the diagonal and triangular matrices for LDL.T factorization. Parameters ---------- ldu : ndarray The compact output returned by the LAPACK routing pivs : ndarray The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For every 2 there is a succeeding 0. lower : bool, optional If set to False, upper triangular part is considered. hermitian : bool, optional If set to False a symmetric complex array is assumed. Returns ------- d : ndarray The block diagonal matrix. lu : ndarray The upper/lower triangular matrix """ is_c = iscomplexobj(ldu) d = diag(diag(ldu)) n = d.shape[0] blk_i = 0 # block index # row/column offsets for selecting sub-, super-diagonal x, y = (1, 0) if lower else (0, 1) lu = tril(ldu, -1) if lower else triu(ldu, 1) diag_inds = arange(n) lu[diag_inds, diag_inds] = 1 for blk in pivs[pivs != 0]: # increment the block index and check for 2s # if 2 then copy the off diagonals depending on uplo inc = blk_i + blk if blk == 2: d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y] # If Hermitian matrix is factorized, the cross-offdiagonal element # should be conjugated. if is_c and hermitian: d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj() else: d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y] lu[blk_i+x, blk_i+y] = 0. blk_i = inc return d, lu def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True): """ Helper function to construct explicit outer factors of LDL factorization. If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k). Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See LAPACK documentation for more details. Parameters ---------- lu : ndarray The triangular array that is extracted from LAPACK routine call with ones on the diagonals. swap_vec : ndarray The array that defines the row swapping indices. If k'th entry is m then rows k,m are swapped. Notice that m'th entry is not necessarily k to avoid undoing the swapping. pivs : ndarray The array that defines the block diagonal structure returned by _ldl_sanitize_ipiv(). lower : bool, optional The boolean to switch between lower and upper triangular structure. Returns ------- lu : ndarray The square outer factor which satisfies the L * D * L.T = A perm : ndarray The permutation vector that brings the lu to the triangular form Notes ----- Note that the original argument "lu" is overwritten. """ n = lu.shape[0] perm = arange(n) # Setup the reading order of the permutation matrix for upper/lower rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1) for ind in range(rs, re, ri): s_ind = swap_vec[ind] if s_ind != ind: # Column start and end positions col_s = ind if lower else 0 col_e = n if lower else ind+1 # If we stumble upon a 2x2 block include both cols in the perm. if pivs[ind] == (0 if lower else 2): col_s += -1 if lower else 0 col_e += 0 if lower else 1 lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e] perm[[s_ind, ind]] = perm[[ind, s_ind]] return lu, argsort(perm)
12,575
34.425352
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_sketches.py
""" Sketching-based Matrix Computations """ # Author: Jordi Montes <[email protected]> # August 28, 2017 from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib._util import check_random_state __all__ = ['clarkson_woodruff_transform'] def cwt_matrix(n_rows, n_columns, seed=None): r"""" Generate a matrix S for the Clarkson-Woodruff sketch. Given the desired size of matrix, the method returns a matrix S of size (n_rows, n_columns) where each column has all the entries set to 0 less one position which has been randomly set to +1 or -1 with equal probability. Parameters ---------- n_rows: int Number of rows of S n_columns: int Number of columns of S seed : None or int or `numpy.random.RandomState` instance, optional This parameter defines the ``RandomState`` object to use for drawing random variates. If None (or ``np.random``), the global ``np.random`` state is used. If integer, it is used to seed the local ``RandomState`` instance. Default is None. Returns ------- S : (n_rows, n_columns) array_like Notes ----- Given a matrix A, with probability at least 9/10, .. math:: ||SA|| == (1 \pm \epsilon)||A|| Where epsilon is related to the size of S """ S = np.zeros((n_rows, n_columns)) nz_positions = np.random.randint(0, n_rows, n_columns) rng = check_random_state(seed) values = rng.choice([1, -1], n_columns) for i in range(n_columns): S[nz_positions[i]][i] = values[i] return S def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None): r"""" Find low-rank matrix approximation via the Clarkson-Woodruff Transform. Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of size (sketch_size, d) which holds: .. math:: ||Ax|| = (1 \pm \epsilon)||A'x|| with high probability. The error is related to the number of rows of the sketch and it is bounded .. math:: poly(r(\epsilon^{-1})) Parameters ---------- input_matrix: array_like Input matrix, of shape ``(n, d)``. sketch_size: int Number of rows for the sketch. seed : None or int or `numpy.random.RandomState` instance, optional This parameter defines the ``RandomState`` object to use for drawing random variates. If None (or ``np.random``), the global ``np.random`` state is used. If integer, it is used to seed the local ``RandomState`` instance. Default is None. Returns ------- A' : array_like Sketch of the input matrix ``A``, of size ``(sketch_size, d)``. Notes ----- This is an implementation of the Clarkson-Woodruff Transform (CountSketch). ``A'`` can be computed in principle in ``O(nnz(A))`` (with ``nnz`` meaning the number of nonzero entries), however we don't take advantage of sparse matrices in this implementation. Examples -------- Given a big dense matrix ``A``: >>> from scipy import linalg >>> n_rows, n_columns, sketch_n_rows = (2000, 100, 100) >>> threshold = 0.1 >>> tmp = np.random.normal(0, 0.1, n_rows*n_columns) >>> A = np.reshape(tmp, (n_rows, n_columns)) >>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows) >>> sketch.shape (100, 100) >>> normA = linalg.norm(A) >>> norm_sketch = linalg.norm(sketch) Now with high probability, the condition ``abs(normA-normSketch) < threshold`` holds. References ---------- .. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation and regression in input sparsity time. In STOC, 2013. """ S = cwt_matrix(sketch_size, input_matrix.shape[0], seed) return np.dot(S, input_matrix)
3,847
30.540984
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_expm_frechet.py
"""Frechet derivative of the matrix exponential.""" from __future__ import division, print_function, absolute_import import numpy as np import scipy.linalg __all__ = ['expm_frechet', 'expm_cond'] def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True): """ Frechet derivative of the matrix exponential of A in the direction E. Parameters ---------- A : (N, N) array_like Matrix of which to take the matrix exponential. E : (N, N) array_like Matrix direction in which to take the Frechet derivative. method : str, optional Choice of algorithm. Should be one of - `SPS` (default) - `blockEnlarge` compute_expm : bool, optional Whether to compute also `expm_A` in addition to `expm_frechet_AE`. Default is True. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- expm_A : ndarray Matrix exponential of A. expm_frechet_AE : ndarray Frechet derivative of the matrix exponential of A in the direction E. For ``compute_expm = False``, only `expm_frechet_AE` is returned. See also -------- expm : Compute the exponential of a matrix. Notes ----- This section describes the available implementations that can be selected by the `method` parameter. The default method is *SPS*. Method *blockEnlarge* is a naive algorithm. Method *SPS* is Scaling-Pade-Squaring [1]_. It is a sophisticated implementation which should take only about 3/8 as much time as the naive implementation. The asymptotics are the same. .. versionadded:: 0.13.0 References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) Computing the Frechet Derivative of the Matrix Exponential, with an application to Condition Number Estimation. SIAM Journal On Matrix Analysis and Applications., 30 (4). pp. 1639-1657. ISSN 1095-7162 Examples -------- >>> import scipy.linalg >>> A = np.random.randn(3, 3) >>> E = np.random.randn(3, 3) >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E) >>> expm_A.shape, expm_frechet_AE.shape ((3, 3), (3, 3)) >>> import scipy.linalg >>> A = np.random.randn(3, 3) >>> E = np.random.randn(3, 3) >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E) >>> M = np.zeros((6, 6)) >>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A >>> expm_M = scipy.linalg.expm(M) >>> np.allclose(expm_A, expm_M[:3, :3]) True >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:]) True """ if check_finite: A = np.asarray_chkfinite(A) E = np.asarray_chkfinite(E) else: A = np.asarray(A) E = np.asarray(E) if A.ndim != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be a square matrix') if E.ndim != 2 or E.shape[0] != E.shape[1]: raise ValueError('expected E to be a square matrix') if A.shape != E.shape: raise ValueError('expected A and E to be the same shape') if method is None: method = 'SPS' if method == 'SPS': expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E) elif method == 'blockEnlarge': expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E) else: raise ValueError('Unknown implementation %s' % method) if compute_expm: return expm_A, expm_frechet_AE else: return expm_frechet_AE def expm_frechet_block_enlarge(A, E): """ This is a helper function, mostly for testing and profiling. Return expm(A), frechet(A, E) """ n = A.shape[0] M = np.vstack([ np.hstack([A, E]), np.hstack([np.zeros_like(A), A])]) expm_M = scipy.linalg.expm(M) return expm_M[:n, :n], expm_M[:n, n:] """ Maximal values ell_m of ||2**-s A|| such that the backward error bound does not exceed 2**-53. """ ell_table_61 = ( None, # 1 2.11e-8, 3.56e-4, 1.08e-2, 6.49e-2, 2.00e-1, 4.37e-1, 7.83e-1, 1.23e0, 1.78e0, 2.42e0, # 11 3.13e0, 3.90e0, 4.74e0, 5.63e0, 6.56e0, 7.52e0, 8.53e0, 9.56e0, 1.06e1, 1.17e1, ) # The b vectors and U and V are copypasted # from scipy.sparse.linalg.matfuncs.py. # M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3) def _diff_pade3(A, E, ident): b = (120., 60., 12., 1.) A2 = A.dot(A) M2 = np.dot(A, E) + np.dot(E, A) U = A.dot(b[3]*A2 + b[1]*ident) V = b[2]*A2 + b[0]*ident Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident) Lv = b[2]*M2 return U, V, Lu, Lv def _diff_pade5(A, E, ident): b = (30240., 15120., 3360., 420., 30., 1.) A2 = A.dot(A) M2 = np.dot(A, E) + np.dot(E, A) A4 = np.dot(A2, A2) M4 = np.dot(A2, M2) + np.dot(M2, A2) U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident) V = b[4]*A4 + b[2]*A2 + b[0]*ident Lu = (A.dot(b[5]*M4 + b[3]*M2) + E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)) Lv = b[4]*M4 + b[2]*M2 return U, V, Lu, Lv def _diff_pade7(A, E, ident): b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) A2 = A.dot(A) M2 = np.dot(A, E) + np.dot(E, A) A4 = np.dot(A2, A2) M4 = np.dot(A2, M2) + np.dot(M2, A2) A6 = np.dot(A2, A4) M6 = np.dot(A4, M2) + np.dot(M4, A2) U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident) V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) + E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)) Lv = b[6]*M6 + b[4]*M4 + b[2]*M2 return U, V, Lu, Lv def _diff_pade9(A, E, ident): b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., 2162160., 110880., 3960., 90., 1.) A2 = A.dot(A) M2 = np.dot(A, E) + np.dot(E, A) A4 = np.dot(A2, A2) M4 = np.dot(A2, M2) + np.dot(M2, A2) A6 = np.dot(A2, A4) M6 = np.dot(A4, M2) + np.dot(M4, A2) A8 = np.dot(A4, A4) M8 = np.dot(A4, M4) + np.dot(M4, A4) U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident) V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) + E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)) Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2 return U, V, Lu, Lv def expm_frechet_algo_64(A, E): n = A.shape[0] s = None ident = np.identity(n) A_norm_1 = scipy.linalg.norm(A, 1) m_pade_pairs = ( (3, _diff_pade3), (5, _diff_pade5), (7, _diff_pade7), (9, _diff_pade9)) for m, pade in m_pade_pairs: if A_norm_1 <= ell_table_61[m]: U, V, Lu, Lv = pade(A, E, ident) s = 0 break if s is None: # scaling s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13])))) A = A * 2.0**-s E = E * 2.0**-s # pade order 13 A2 = np.dot(A, A) M2 = np.dot(A, E) + np.dot(E, A) A4 = np.dot(A2, A2) M4 = np.dot(A2, M2) + np.dot(M2, A2) A6 = np.dot(A2, A4) M6 = np.dot(A4, M2) + np.dot(M4, A2) b = (64764752532480000., 32382376266240000., 7771770303897600., 1187353796428800., 129060195264000., 10559470521600., 670442572800., 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.) W1 = b[13]*A6 + b[11]*A4 + b[9]*A2 W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2 Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident W = np.dot(A6, W1) + W2 U = np.dot(A, W) V = np.dot(A6, Z1) + Z2 Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2 Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2 Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2 Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2 Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2 Lu = np.dot(A, Lw) + np.dot(E, W) Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2 # factor once and solve twice lu_piv = scipy.linalg.lu_factor(-U + V) R = scipy.linalg.lu_solve(lu_piv, U + V) L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R)) # squaring for k in range(s): L = np.dot(R, L) + np.dot(L, R) R = np.dot(R, R) return R, L def vec(M): """ Stack columns of M to construct a single vector. This is somewhat standard notation in linear algebra. Parameters ---------- M : 2d array_like Input matrix Returns ------- v : 1d ndarray Output vector """ return M.T.ravel() def expm_frechet_kronform(A, method=None, check_finite=True): """ Construct the Kronecker form of the Frechet derivative of expm. Parameters ---------- A : array_like with shape (N, N) Matrix to be expm'd. method : str, optional Extra keyword to be passed to expm_frechet. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- K : 2d ndarray with shape (N*N, N*N) Kronecker form of the Frechet derivative of the matrix exponential. Notes ----- This function is used to help compute the condition number of the matrix exponential. See also -------- expm : Compute a matrix exponential. expm_frechet : Compute the Frechet derivative of the matrix exponential. expm_cond : Compute the relative condition number of the matrix exponential in the Frobenius norm. """ if check_finite: A = np.asarray_chkfinite(A) else: A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') n = A.shape[0] ident = np.identity(n) cols = [] for i in range(n): for j in range(n): E = np.outer(ident[i], ident[j]) F = expm_frechet(A, E, method=method, compute_expm=False, check_finite=False) cols.append(vec(F)) return np.vstack(cols).T def expm_cond(A, check_finite=True): """ Relative condition number of the matrix exponential in the Frobenius norm. Parameters ---------- A : 2d array_like Square input matrix with shape (N, N). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- kappa : float The relative condition number of the matrix exponential in the Frobenius norm Notes ----- A faster estimate for the condition number in the 1-norm has been published but is not yet implemented in scipy. .. versionadded:: 0.14.0 See also -------- expm : Compute the exponential of a matrix. expm_frechet : Compute the Frechet derivative of the matrix exponential. Examples -------- >>> from scipy.linalg import expm_cond >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]]) >>> k = expm_cond(A) >>> k 1.7787805864469866 """ if check_finite: A = np.asarray_chkfinite(A) else: A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') X = scipy.linalg.expm(A) K = expm_frechet_kronform(A, check_finite=False) # The following norm choices are deliberate. # The norms of A and X are Frobenius norms, # and the norm of K is the induced 2-norm. A_norm = scipy.linalg.norm(A, 'fro') X_norm = scipy.linalg.norm(X, 'fro') K_norm = scipy.linalg.norm(K, 2) kappa = (K_norm * A_norm) / X_norm return kappa
12,387
29.067961
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp_schur.py
"""Schur decomposition functions.""" from __future__ import division, print_function, absolute_import import numpy from numpy import asarray_chkfinite, single, asarray, array from numpy.linalg import norm from scipy._lib.six import callable # Local imports. from .misc import LinAlgError, _datacopied from .lapack import get_lapack_funcs from .decomp import eigvals __all__ = ['schur', 'rsf2csf'] _double_precision = ['i', 'l', 'd'] def schur(a, output='real', lwork=None, overwrite_a=False, sort=None, check_finite=True): """ Compute Schur decomposition of a matrix. The Schur decomposition is:: A = Z T Z^H where Z is unitary and T is either upper-triangular, or for real Schur decomposition (output='real'), quasi-upper triangular. In the quasi-triangular form, 2x2 blocks describing complex-valued eigenvalue pairs may extrude from the diagonal. Parameters ---------- a : (M, M) array_like Matrix to decompose output : {'real', 'complex'}, optional Construct the real or complex Schur decomposition (for real matrices). lwork : int, optional Work array size. If None or -1, it is automatically computed. overwrite_a : bool, optional Whether to overwrite data in a (may improve performance). sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). Alternatively, string parameters may be used:: 'lhp' Left-hand plane (x.real < 0.0) 'rhp' Right-hand plane (x.real > 0.0) 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0) 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) Defaults to None (no sorting). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- T : (M, M) ndarray Schur form of A. It is real-valued for the real Schur decomposition. Z : (M, M) ndarray An unitary Schur transformation matrix for A. It is real-valued for the real Schur decomposition. sdim : int If and only if sorting was requested, a third return value will contain the number of eigenvalues satisfying the sort condition. Raises ------ LinAlgError Error raised under three conditions: 1. The algorithm failed due to a failure of the QR algorithm to compute all eigenvalues 2. If eigenvalue sorting was requested, the eigenvalues could not be reordered due to a failure to separate eigenvalues, usually because of poor conditioning 3. If eigenvalue sorting was requested, roundoff errors caused the leading eigenvalues to no longer satisfy the sorting condition See also -------- rsf2csf : Convert real Schur form to complex Schur form Examples -------- >>> from scipy.linalg import schur, eigvals >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) >>> T, Z = schur(A) >>> T array([[ 2.65896708, 1.42440458, -1.92933439], [ 0. , -0.32948354, -0.49063704], [ 0. , 1.31178921, -0.32948354]]) >>> Z array([[0.72711591, -0.60156188, 0.33079564], [0.52839428, 0.79801892, 0.28976765], [0.43829436, 0.03590414, -0.89811411]]) >>> T2, Z2 = schur(A, output='complex') >>> T2 array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j], [ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j], [ 0. , 0. , -0.32948354-0.80225456j]]) >>> eigvals(T2) array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j]) An arbitrary custom eig-sorting condition, having positive imaginary part, which is satisfied by only one eigenvalue >>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0) >>> sdim 1 """ if output not in ['real', 'complex', 'r', 'c']: raise ValueError("argument must be 'real', or 'complex'") if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): raise ValueError('expected square matrix') typ = a1.dtype.char if output in ['complex', 'c'] and typ not in ['F', 'D']: if typ in _double_precision: a1 = a1.astype('D') typ = 'D' else: a1 = a1.astype('F') typ = 'F' overwrite_a = overwrite_a or (_datacopied(a1, a)) gees, = get_lapack_funcs(('gees',), (a1,)) if lwork is None or lwork == -1: # get optimal work array result = gees(lambda x: None, a1, lwork=-1) lwork = result[-2][0].real.astype(numpy.int) if sort is None: sort_t = 0 sfunction = lambda x: None else: sort_t = 1 if callable(sort): sfunction = sort elif sort == 'lhp': sfunction = lambda x: (x.real < 0.0) elif sort == 'rhp': sfunction = lambda x: (x.real >= 0.0) elif sort == 'iuc': sfunction = lambda x: (abs(x) <= 1.0) elif sort == 'ouc': sfunction = lambda x: (abs(x) > 1.0) else: raise ValueError("'sort' parameter must either be 'None', or a " "callable, or one of ('lhp','rhp','iuc','ouc')") result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a, sort_t=sort_t) info = result[-1] if info < 0: raise ValueError('illegal value in {}-th argument of internal gees' ''.format(-info)) elif info == a1.shape[0] + 1: raise LinAlgError('Eigenvalues could not be separated for reordering.') elif info == a1.shape[0] + 2: raise LinAlgError('Leading eigenvalues do not satisfy sort condition.') elif info > 0: raise LinAlgError("Schur form not found. Possibly ill-conditioned.") if sort_t == 0: return result[0], result[-3] else: return result[0], result[-3], result[1] eps = numpy.finfo(float).eps feps = numpy.finfo(single).eps _array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0, 'f': 0, 'd': 0, 'F': 1, 'D': 1} _array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} _array_type = [['f', 'd'], ['F', 'D']] def _commonType(*arrays): kind = 0 precision = 0 for a in arrays: t = a.dtype.char kind = max(kind, _array_kind[t]) precision = max(precision, _array_precision[t]) return _array_type[kind][precision] def _castCopy(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.char == type: cast_arrays = cast_arrays + (a.copy(),) else: cast_arrays = cast_arrays + (a.astype(type),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def rsf2csf(T, Z, check_finite=True): """ Convert real Schur form to complex Schur form. Convert a quasi-diagonal real-valued Schur form to the upper triangular complex-valued Schur form. Parameters ---------- T : (M, M) array_like Real Schur form of the original array Z : (M, M) array_like Schur transformation matrix check_finite : bool, optional Whether to check that the input arrays contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- T : (M, M) ndarray Complex Schur form of the original array Z : (M, M) ndarray Schur transformation matrix corresponding to the complex form See Also -------- schur : Schur decomposition of an array Examples -------- >>> from scipy.linalg import schur, rsf2csf >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) >>> T, Z = schur(A) >>> T array([[ 2.65896708, 1.42440458, -1.92933439], [ 0. , -0.32948354, -0.49063704], [ 0. , 1.31178921, -0.32948354]]) >>> Z array([[0.72711591, -0.60156188, 0.33079564], [0.52839428, 0.79801892, 0.28976765], [0.43829436, 0.03590414, -0.89811411]]) >>> T2 , Z2 = rsf2csf(T, Z) >>> T2 array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j], [0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j], [0.+0.j , 0.+0.j, -0.32948354-0.802254558j]]) >>> Z2 array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j], [0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j], [0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]]) """ if check_finite: Z, T = map(asarray_chkfinite, (Z, T)) else: Z, T = map(asarray, (Z, T)) for ind, X in enumerate([Z, T]): if X.ndim != 2 or X.shape[0] != X.shape[1]: raise ValueError("Input '{}' must be square.".format('ZT'[ind])) if T.shape[0] != Z.shape[0]: raise ValueError("Input array shapes must match: Z: {} vs. T: {}" "".format(Z.shape, T.shape)) N = T.shape[0] t = _commonType(Z, T, array([3.0], 'F')) Z, T = _castCopy(t, Z, T) for m in range(N-1, 0, -1): if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])): mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m] r = norm([mu[0], T[m, m-1]]) c = mu[0] / r s = T[m, m-1] / r G = array([[c.conj(), s], [-s, c]], dtype=t) T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:]) T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T) Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T) T[m, m-1] = 0.0 return T, Z
10,318
33.861486
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_cython_signature_generator.py
""" A script that uses f2py to generate the signature files used to make the Cython BLAS and LAPACK wrappers from the fortran source code for LAPACK and the reference BLAS. To generate the BLAS wrapper signatures call: python _cython_signature_generator.py blas <blas_directory> <out_file> To generate the LAPACK wrapper signatures call: python _cython_signature_generator.py lapack <lapack_src_directory> <out_file> """ import glob from numpy.f2py import crackfortran sig_types = {'integer': 'int', 'complex': 'c', 'double precision': 'd', 'real': 's', 'complex*16': 'z', 'double complex': 'z', 'character': 'char', 'logical': 'bint'} def get_type(info, arg): argtype = sig_types[info['vars'][arg]['typespec']] if argtype == 'c' and info['vars'][arg].get('kindselector') is not None: argtype = 'z' return argtype def make_signature(filename): info = crackfortran.crackfortran(filename)[0] name = info['name'] if info['block'] == 'subroutine': return_type = 'void' else: return_type = get_type(info, name) arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']] args = ', '.join(arglist) # Eliminate strange variable naming that replaces rank with rank_bn. args = args.replace('rank_bn', 'rank') return '{0} {1}({2})\n'.format(return_type, name, args) def get_sig_name(line): return line.split('(')[0].split(' ')[-1] def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None): if directory[-1] in ['/', '\\']: directory = directory[:-1] files = glob.glob(directory + '/*.f*') if exclusions is None: exclusions = [] if manual_wrappers is not None: exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')] signatures = [] for filename in files: name = filename.split('\\')[-1][:-2] if name in exclusions: continue signatures.append(make_signature(filename)) if manual_wrappers is not None: signatures += [l + '\n' for l in manual_wrappers.split('\n')] signatures.sort(key=get_sig_name) comment = ["# This file was generated by _cython_signature_generator.py.\n", "# Do not edit this file directly.\n\n"] with open(outfile, 'w') as f: f.writelines(comment) f.writelines(signatures) # The signature that is used for zcgesv in lapack 3.1.0 and 3.1.1 changed # in version 3.2.0. The version included in the clapack on OSX has the # more recent signature though. # slamch and dlamch are not in the lapack src directory, but,since they # already have Python wrappers, we'll wrap them as well. # The other manual signatures are used because the signature generating # functions don't work when function pointer arguments are used. lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) d dlamch(char *cmach) void ilaver(int *vers_major, int *vers_minor, int *vers_patch) void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) s slamch(char *cmach) void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)''' if __name__ == '__main__': from sys import argv libname, src_dir, outfile = argv[1:] # Exclude scabs and sisnan since they aren't currently included # in the scipy-specific ABI wrappers. if libname.lower() == 'blas': sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla']) elif libname.lower() == 'lapack': # Exclude all routines that do not have consistent interfaces from # LAPACK 3.1.0 through 3.6.0. # Also exclude routines with string arguments to avoid # compatibility woes with different standards for string arguments. # Exclude sisnan and slaneg since they aren't currently included in # The ABI compatibility wrappers. exclusions = ['sisnan', 'csrot', 'zdrot', 'ilaenv', 'iparmq', 'lsamen', 'xerbla', 'zcgesv', 'dlaisnan', 'slaisnan', 'dlazq3', 'dlazq4', 'slazq3', 'slazq4', 'dlasq3', 'dlasq4', 'slasq3', 'slasq4', 'dlasq5', 'slasq5', 'slaneg', # Routines deprecated in LAPACK 3.6.0 'cgegs', 'cgegv', 'cgelsx', 'cgeqpf', 'cggsvd', 'cggsvp', 'clahrd', 'clatzm', 'ctzrqf', 'dgegs', 'dgegv', 'dgelsx', 'dgeqpf', 'dggsvd', 'dggsvp', 'dlahrd', 'dlatzm', 'dtzrqf', 'sgegs', 'sgegv', 'sgelsx', 'sgeqpf', 'sggsvd', 'sggsvp', 'slahrd', 'slatzm', 'stzrqf', 'zgegs', 'zgegv', 'zgelsx', 'zgeqpf', 'zggsvd', 'zggsvp', 'zlahrd', 'zlatzm', 'ztzrqf'] sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers, exclusions=exclusions)
8,371
63.4
297
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/lapack.py
""" Low-level LAPACK functions (:mod:`scipy.linalg.lapack`) ======================================================= This module contains low-level functions from the LAPACK library. The `*gegv` family of routines have been removed from LAPACK 3.6.0 and have been deprecated in SciPy 0.17.0. They will be removed in a future release. .. versionadded:: 0.12.0 .. note:: The common ``overwrite_<>`` option in many routines, allows the input arrays to be overwritten to avoid extra memory allocation. However this requires the array to satisfy two conditions which are memory order and the data type to match exactly the order and the type expected by the routine. As an example, if you pass a double precision float array to any ``S....`` routine which expects single precision arguments, f2py will create an intermediate array to match the argument types and overwriting will be performed on that intermediate array. Similarly, if a C-contiguous array is passed, f2py will pass a FORTRAN-contiguous array internally. Please make sure that these details are satisfied. More information can be found in the f2py documentation. .. warning:: These functions do little to no error checking. It is possible to cause crashes by mis-using them, so prefer using the higher-level routines in `scipy.linalg`. Finding functions ----------------- .. autosummary:: get_lapack_funcs All functions ------------- .. autosummary:: :toctree: generated/ sgbsv dgbsv cgbsv zgbsv sgbtrf dgbtrf cgbtrf zgbtrf sgbtrs dgbtrs cgbtrs zgbtrs sgebal dgebal cgebal zgebal sgees dgees cgees zgees sgeev dgeev cgeev zgeev sgeev_lwork dgeev_lwork cgeev_lwork zgeev_lwork sgegv dgegv cgegv zgegv sgehrd dgehrd cgehrd zgehrd sgehrd_lwork dgehrd_lwork cgehrd_lwork zgehrd_lwork sgelss dgelss cgelss zgelss sgelss_lwork dgelss_lwork cgelss_lwork zgelss_lwork sgelsd dgelsd cgelsd zgelsd sgelsd_lwork dgelsd_lwork cgelsd_lwork zgelsd_lwork sgelsy dgelsy cgelsy zgelsy sgelsy_lwork dgelsy_lwork cgelsy_lwork zgelsy_lwork sgeqp3 dgeqp3 cgeqp3 zgeqp3 sgeqrf dgeqrf cgeqrf zgeqrf sgerqf dgerqf cgerqf zgerqf sgesdd dgesdd cgesdd zgesdd sgesdd_lwork dgesdd_lwork cgesdd_lwork zgesdd_lwork sgesvd dgesvd cgesvd zgesvd sgesvd_lwork dgesvd_lwork cgesvd_lwork zgesvd_lwork sgesv dgesv cgesv zgesv sgesvx dgesvx cgesvx zgesvx sgecon dgecon cgecon zgecon ssysv dsysv csysv zsysv ssysv_lwork dsysv_lwork csysv_lwork zsysv_lwork ssysvx dsysvx csysvx zsysvx ssysvx_lwork dsysvx_lwork csysvx_lwork zsysvx_lwork ssygst dsygst ssytrd dsytrd ssytrd_lwork dsytrd_lwork chetrd zhetrd chetrd_lwork zhetrd_lwork chesv zhesv chesv_lwork zhesv_lwork chesvx zhesvx chesvx_lwork zhesvx_lwork chegst zhegst sgetrf dgetrf cgetrf zgetrf sgetri dgetri cgetri zgetri sgetri_lwork dgetri_lwork cgetri_lwork zgetri_lwork sgetrs dgetrs cgetrs zgetrs sgges dgges cgges zgges sggev dggev cggev zggev chbevd zhbevd chbevx zhbevx cheev zheev cheevd zheevd cheevr zheevr chegv zhegv chegvd zhegvd chegvx zhegvx slarf dlarf clarf zlarf slarfg dlarfg clarfg zlarfg slartg dlartg clartg zlartg slasd4 dlasd4 slaswp dlaswp claswp zlaswp slauum dlauum clauum zlauum spbsv dpbsv cpbsv zpbsv spbtrf dpbtrf cpbtrf zpbtrf spbtrs dpbtrs cpbtrs zpbtrs sposv dposv cposv zposv sposvx dposvx cposvx zposvx spocon dpocon cpocon zpocon spotrf dpotrf cpotrf zpotrf spotri dpotri cpotri zpotri spotrs dpotrs cpotrs zpotrs crot zrot strsyl dtrsyl ctrsyl ztrsyl strtri dtrtri ctrtri ztrtri strtrs dtrtrs ctrtrs ztrtrs cunghr zunghr cungqr zungqr cungrq zungrq cunmqr zunmqr sgtsv dgtsv cgtsv zgtsv sptsv dptsv cptsv zptsv slamch dlamch sorghr dorghr sorgqr dorgqr sorgrq dorgrq sormqr dormqr ssbev dsbev ssbevd dsbevd ssbevx dsbevx sstebz dstebz sstemr dstemr ssterf dsterf sstein dstein sstev dstev ssyev dsyev ssyevd dsyevd ssyevr dsyevr ssygv dsygv ssygvd dsygvd ssygvx dsygvx slange dlange clange zlange ilaver """ # # Author: Pearu Peterson, March 2002 # from __future__ import division, print_function, absolute_import __all__ = ['get_lapack_funcs'] import numpy as _np from .blas import _get_funcs # Backward compatibility: from .blas import find_best_blas_type as find_best_lapack_type from scipy.linalg import _flapack try: from scipy.linalg import _clapack except ImportError: _clapack = None # Backward compatibility from scipy._lib._util import DeprecatedImport as _DeprecatedImport clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack") flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack") # Expose all functions (only flapack --- clapack is an implementation detail) empty_module = None from scipy.linalg._flapack import * del empty_module _dep_message = """The `*gegv` family of routines has been deprecated in LAPACK 3.6.0 in favor of the `*ggev` family of routines. The corresponding wrappers will be removed from SciPy in a future release.""" cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message) dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message) sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message) zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message) # Modyfy _flapack in this scope so the deprecation warnings apply to # functions returned by get_lapack_funcs. _flapack.cgegv = cgegv _flapack.dgegv = dgegv _flapack.sgegv = sgegv _flapack.zgegv = zgegv # some convenience alias for complex functions _lapack_alias = { 'corghr': 'cunghr', 'zorghr': 'zunghr', 'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork', 'corgqr': 'cungqr', 'zorgqr': 'zungqr', 'cormqr': 'cunmqr', 'zormqr': 'zunmqr', 'corgrq': 'cungrq', 'zorgrq': 'zungrq', } def get_lapack_funcs(names, arrays=(), dtype=None): """Return available LAPACK function objects from names. Arrays are used to determine the optimal prefix of LAPACK routines. Parameters ---------- names : str or sequence of str Name(s) of LAPACK functions without type prefix. arrays : sequence of ndarrays, optional Arrays can be given to determine optimal prefix of LAPACK routines. If not given, double-precision routines will be used, otherwise the most generic type in arrays will be used. dtype : str or dtype, optional Data-type specifier. Not used if `arrays` is non-empty. Returns ------- funcs : list List containing the found function(s). Notes ----- This routine automatically chooses between Fortran/C interfaces. Fortran code is used whenever possible for arrays with column major order. In all other cases, C code is preferred. In LAPACK, the naming convention is that all functions start with a type prefix, which depends on the type of the principal matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy types {float32, float64, complex64, complex128} respectively, and are stored in attribute ``typecode`` of the returned functions. Examples -------- Suppose we would like to use '?lange' routine which computes the selected norm of an array. We pass our array in order to get the correct 'lange' flavor. >>> import scipy.linalg as LA >>> a = np.random.rand(3,2) >>> x_lange = LA.get_lapack_funcs('lange', (a,)) >>> x_lange.typecode 'd' >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,)) >>> x_lange.typecode 'z' Several LAPACK routines work best when its internal WORK array has the optimal size (big enough for fast computation and small enough to avoid waste of memory). This size is determined also by a dedicated query to the function which is often wrapped as a standalone function and commonly denoted as ``###_lwork``. Below is an example for ``?sysv`` >>> import scipy.linalg as LA >>> a = np.random.rand(1000,1000) >>> b = np.random.rand(1000,1)*1j >>> # We pick up zsysv and zsysv_lwork due to b array ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b)) >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real)) """ return _get_funcs(names, arrays, dtype, "LAPACK", _flapack, _clapack, "flapack", "clapack", _lapack_alias) def _compute_lwork(routine, *args, **kwargs): """ Round floating-point lwork returned by lapack to integer. Several LAPACK routines compute optimal values for LWORK, which they return in a floating-point variable. However, for large values of LWORK, single-precision floating point is not sufficient to hold the exact value --- some LAPACK versions (<= 3.5.0 at least) truncate the returned integer to single precision and in some cases this can be smaller than the required value. Examples -------- >>> from scipy.linalg import lapack >>> n = 5000 >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork')) >>> lwork = lapack._compute_lwork(s_lw, n) >>> lwork 32000 """ wi = routine(*args, **kwargs) if len(wi) < 2: raise ValueError('') info = wi[-1] if info != 0: raise ValueError("Internal work array size computation failed: " "%d" % (info,)) lwork = [w.real for w in wi[:-1]] dtype = getattr(routine, 'dtype', None) if dtype == _np.float32 or dtype == _np.complex64: # Single-precision routine -- take next fp value to work # around possible truncation in LAPACK code lwork = _np.nextafter(lwork, _np.inf, dtype=_np.float32) lwork = _np.array(lwork, _np.int64) if _np.any(_np.logical_or(lwork < 0, lwork > _np.iinfo(_np.int32).max)): raise ValueError("Too large work array required -- computation cannot " "be performed with standard 32-bit LAPACK.") lwork = lwork.astype(_np.int32) if lwork.size == 1: return lwork[0] return lwork
11,071
16.43622
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_solvers.py
"""Matrix equation solver routines""" # Author: Jeffrey Armstrong <[email protected]> # February 24, 2012 # Modified: Chad Fulton <[email protected]> # June 19, 2014 # Modified: Ilhan Polat <[email protected]> # September 13, 2016 from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy.linalg import inv, LinAlgError, norm, cond, svd from .basic import solve, solve_triangular, matrix_balance from .lapack import get_lapack_funcs from .decomp_schur import schur from .decomp_lu import lu from .decomp_qr import qr from ._decomp_qz import ordqz from .decomp import _asarray_validated from .special_matrices import kron, block_diag __all__ = ['solve_sylvester', 'solve_continuous_lyapunov', 'solve_discrete_lyapunov', 'solve_lyapunov', 'solve_continuous_are', 'solve_discrete_are'] def solve_sylvester(a, b, q): """ Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`. Parameters ---------- a : (M, M) array_like Leading matrix of the Sylvester equation b : (N, N) array_like Trailing matrix of the Sylvester equation q : (M, N) array_like Right-hand side Returns ------- x : (M, N) ndarray The solution to the Sylvester equation. Raises ------ LinAlgError If solution was not found Notes ----- Computes a solution to the Sylvester matrix equation via the Bartels- Stewart algorithm. The A and B matrices first undergo Schur decompositions. The resulting matrices are used to construct an alternative Sylvester equation (``RY + YS^T = F``) where the R and S matrices are in quasi-triangular form (or, when R, S or F are complex, triangular form). The simplified equation is then solved using ``*TRSYL`` from LAPACK directly. .. versionadded:: 0.11.0 Examples -------- Given `a`, `b`, and `q` solve for `x`: >>> from scipy import linalg >>> a = np.array([[-3, -2, 0], [-1, -1, 3], [3, -5, -1]]) >>> b = np.array([[1]]) >>> q = np.array([[1],[2],[3]]) >>> x = linalg.solve_sylvester(a, b, q) >>> x array([[ 0.0625], [-0.5625], [ 0.6875]]) >>> np.allclose(a.dot(x) + x.dot(b), q) True """ # Compute the Schur decomp form of a r, u = schur(a, output='real') # Compute the Schur decomp of b s, v = schur(b.conj().transpose(), output='real') # Construct f = u'*q*v f = np.dot(np.dot(u.conj().transpose(), q), v) # Call the Sylvester equation solver trsyl, = get_lapack_funcs(('trsyl',), (r, s, f)) if trsyl is None: raise RuntimeError('LAPACK implementation does not contain a proper ' 'Sylvester equation solver (TRSYL)') y, scale, info = trsyl(r, s, f, tranb='C') y = scale*y if info < 0: raise LinAlgError("Illegal value encountered in " "the %d term" % (-info,)) return np.dot(np.dot(u, y), v.conj().transpose()) def solve_continuous_lyapunov(a, q): """ Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`. Uses the Bartels-Stewart algorithm to find :math:`X`. Parameters ---------- a : array_like A square matrix q : array_like Right-hand side square matrix Returns ------- x : ndarray Solution to the continuous Lyapunov equation See Also -------- solve_discrete_lyapunov : computes the solution to the discrete-time Lyapunov equation solve_sylvester : computes the solution to the Sylvester equation Notes ----- The continuous Lyapunov equation is a special form of the Sylvester equation, hence this solver relies on LAPACK routine ?TRSYL. .. versionadded:: 0.11.0 Examples -------- Given `a` and `q` solve for `x`: >>> from scipy import linalg >>> a = np.array([[-3, -2, 0], [-1, -1, 0], [0, -5, -1]]) >>> b = np.array([2, 4, -1]) >>> q = np.eye(3) >>> x = linalg.solve_continuous_lyapunov(a, q) >>> x array([[ -0.75 , 0.875 , -3.75 ], [ 0.875 , -1.375 , 5.3125], [ -3.75 , 5.3125, -27.0625]]) >>> np.allclose(a.dot(x) + x.dot(a.T), q) True """ a = np.atleast_2d(_asarray_validated(a, check_finite=True)) q = np.atleast_2d(_asarray_validated(q, check_finite=True)) r_or_c = float for ind, _ in enumerate((a, q)): if np.iscomplexobj(_): r_or_c = complex if not np.equal(*_.shape): raise ValueError("Matrix {} should be square.".format("aq"[ind])) # Shape consistency check if a.shape != q.shape: raise ValueError("Matrix a and q should have the same shape.") # Compute the Schur decomp form of a r, u = schur(a, output='real') # Construct f = u'*q*u f = u.conj().T.dot(q.dot(u)) # Call the Sylvester equation solver trsyl = get_lapack_funcs('trsyl', (r, f)) dtype_string = 'T' if r_or_c == float else 'C' y, scale, info = trsyl(r, r, f, tranb=dtype_string) if info < 0: raise ValueError('?TRSYL exited with the internal error ' '"illegal value in argument number {}.". See ' 'LAPACK documentation for the ?TRSYL error codes.' ''.format(-info)) elif info == 1: warnings.warn('Input "a" has an eigenvalue pair whose sum is ' 'very close to or exactly zero. The solution is ' 'obtained via perturbing the coefficients.', RuntimeWarning) y *= scale return u.dot(y).dot(u.conj().T) # For backwards compatibility, keep the old name solve_lyapunov = solve_continuous_lyapunov def _solve_discrete_lyapunov_direct(a, q): """ Solves the discrete Lyapunov equation directly. This function is called by the `solve_discrete_lyapunov` function with `method=direct`. It is not supposed to be called directly. """ lhs = kron(a, a.conj()) lhs = np.eye(lhs.shape[0]) - lhs x = solve(lhs, q.flatten()) return np.reshape(x, q.shape) def _solve_discrete_lyapunov_bilinear(a, q): """ Solves the discrete Lyapunov equation using a bilinear transformation. This function is called by the `solve_discrete_lyapunov` function with `method=bilinear`. It is not supposed to be called directly. """ eye = np.eye(a.shape[0]) aH = a.conj().transpose() aHI_inv = inv(aH + eye) b = np.dot(aH - eye, aHI_inv) c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv) return solve_lyapunov(b.conj().transpose(), -c) def solve_discrete_lyapunov(a, q, method=None): """ Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`. Parameters ---------- a, q : (M, M) array_like Square matrices corresponding to A and Q in the equation above respectively. Must have the same shape. method : {'direct', 'bilinear'}, optional Type of solver. If not given, chosen to be ``direct`` if ``M`` is less than 10 and ``bilinear`` otherwise. Returns ------- x : ndarray Solution to the discrete Lyapunov equation See Also -------- solve_continuous_lyapunov : computes the solution to the continuous-time Lyapunov equation Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *direct* if ``M`` is less than 10 and ``bilinear`` otherwise. Method *direct* uses a direct analytical solution to the discrete Lyapunov equation. The algorithm is given in, for example, [1]_. However it requires the linear solution of a system with dimension :math:`M^2` so that performance degrades rapidly for even moderately sized matrices. Method *bilinear* uses a bilinear transformation to convert the discrete Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)` where :math:`B=(A-I)(A+I)^{-1}` and :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be efficiently solved since it is a special case of a Sylvester equation. The transformation algorithm is from Popov (1964) as described in [2]_. .. versionadded:: 0.11.0 References ---------- .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton University Press, 1994. 265. Print. http://doc1.lbfl.li/aca/FLMF037168.pdf .. [2] Gajic, Z., and M.T.J. Qureshi. 2008. Lyapunov Matrix Equation in System Stability and Control. Dover Books on Engineering Series. Dover Publications. Examples -------- Given `a` and `q` solve for `x`: >>> from scipy import linalg >>> a = np.array([[0.2, 0.5],[0.7, -0.9]]) >>> q = np.eye(2) >>> x = linalg.solve_discrete_lyapunov(a, q) >>> x array([[ 0.70872893, 1.43518822], [ 1.43518822, -2.4266315 ]]) >>> np.allclose(a.dot(x).dot(a.T)-x, -q) True """ a = np.asarray(a) q = np.asarray(q) if method is None: # Select automatically based on size of matrices if a.shape[0] >= 10: method = 'bilinear' else: method = 'direct' meth = method.lower() if meth == 'direct': x = _solve_discrete_lyapunov_direct(a, q) elif meth == 'bilinear': x = _solve_discrete_lyapunov_bilinear(a, q) else: raise ValueError('Unknown solver %s' % method) return x def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True): r""" Solves the continuous-time algebraic Riccati equation (CARE). The CARE is defined as .. math:: X A + A^H X - X B R^{-1} B^H X + Q = 0 The limitations for a solution to exist are : * All eigenvalues of :math:`A` on the right half plane, should be controllable. * The associated hamiltonian pencil (See Notes), should have eigenvalues sufficiently away from the imaginary axis. Moreover, if ``e`` or ``s`` is not precisely ``None``, then the generalized version of CARE .. math:: E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0 is solved. When omitted, ``e`` is assumed to be the identity and ``s`` is assumed to be the zero matrix with sizes compatible with ``a`` and ``b`` respectively. Parameters ---------- a : (M, M) array_like Square matrix b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Nonsingular square matrix e : (M, M) array_like, optional Nonsingular square matrix s : (M, N) array_like, optional Input balanced : bool, optional The boolean that indicates whether a balancing step is performed on the data. The default is set to True. Returns ------- x : (M, M) ndarray Solution to the continuous-time algebraic Riccati equation. Raises ------ LinAlgError For cases where the stable subspace of the pencil could not be isolated. See Notes section and the references for details. See Also -------- solve_discrete_are : Solves the discrete-time algebraic Riccati equation Notes ----- The equation is solved by forming the extended hamiltonian matrix pencil, as described in [1]_, :math:`H - \lambda J` given by the block matrices :: [ A 0 B ] [ E 0 0 ] [-Q -A^H -S ] - \lambda * [ 0 E^H 0 ] [ S^H B^H R ] [ 0 0 0 ] and using a QZ decomposition method. In this algorithm, the fail conditions are linked to the symmetry of the product :math:`U_2 U_1^{-1}` and condition number of :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the eigenvectors spanning the stable subspace with 2m rows and partitioned into two m-row matrices. See [1]_ and [2]_ for more details. In order to improve the QZ decomposition accuracy, the pencil goes through a balancing step where the sum of absolute values of :math:`H` and :math:`J` entries (after removing the diagonal entries of the sum) is balanced following the recipe given in [3]_. .. versionadded:: 0.11.0 References ---------- .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving Riccati Equations.", SIAM Journal on Scientific and Statistical Computing, Vol.2(2), DOI: 10.1137/0902010 .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati Equations.", Massachusetts Institute of Technology. Laboratory for Information and Decision Systems. LIDS-R ; 859. Available online : http://hdl.handle.net/1721.1/1301 .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001, SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993 Examples -------- Given `a`, `b`, `q`, and `r` solve for `x`: >>> from scipy import linalg >>> a = np.array([[4, 3], [-4.5, -3.5]]) >>> b = np.array([[1], [-1]]) >>> q = np.array([[9, 6], [6, 4.]]) >>> r = 1 >>> x = linalg.solve_continuous_are(a, b, q, r) >>> x array([[ 21.72792206, 14.48528137], [ 14.48528137, 9.65685425]]) >>> np.allclose(a.T.dot(x) + x.dot(a)-x.dot(b).dot(b.T).dot(x), -q) True """ # Validate input arguments a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args( a, b, q, r, e, s, 'care') H = np.empty((2*m+n, 2*m+n), dtype=r_or_c) H[:m, :m] = a H[:m, m:2*m] = 0. H[:m, 2*m:] = b H[m:2*m, :m] = -q H[m:2*m, m:2*m] = -a.conj().T H[m:2*m, 2*m:] = 0. if s is None else -s H[2*m:, :m] = 0. if s is None else s.conj().T H[2*m:, m:2*m] = b.conj().T H[2*m:, 2*m:] = r if gen_are and e is not None: J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c)) else: J = block_diag(np.eye(2*m), np.zeros_like(r, dtype=r_or_c)) if balanced: # xGEBAL does not remove the diagonals before scaling. Also # to avoid destroying the Symplectic structure, we follow Ref.3 M = np.abs(H) + np.abs(J) M[np.diag_indices_from(M)] = 0. _, (sca, _) = matrix_balance(M, separate=1, permute=0) # do we need to bother? if not np.allclose(sca, np.ones_like(sca)): # Now impose diag(D,inv(D)) from Benner where D is # square root of s_i/s_(n+i) for i=0,.... sca = np.log2(sca) # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !! s = np.round((sca[m:2*m] - sca[:m])/2) sca = 2 ** np.r_[s, -s, sca[2*m:]] # Elementwise multiplication via broadcasting. elwisescale = sca[:, None] * np.reciprocal(sca) H *= elwisescale J *= elwisescale # Deflate the pencil to 2m x 2m ala Ref.1, eq.(55) q, r = qr(H[:, -n:]) H = q[:, n:].conj().T.dot(H[:, :2*m]) J = q[:2*m, n:].conj().T.dot(J[:2*m, :2*m]) # Decide on which output type is needed for QZ out_str = 'real' if r_or_c == float else 'complex' _, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True, overwrite_b=True, check_finite=False, output=out_str) # Get the relevant parts of the stable subspace basis if e is not None: u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m]))) u00 = u[:m, :m] u10 = u[m:, :m] # Solve via back-substituion after checking the condition of u00 up, ul, uu = lu(u00) if 1/cond(uu) < np.spacing(1.): raise LinAlgError('Failed to find a finite solution.') # Exploit the triangular structure x = solve_triangular(ul.conj().T, solve_triangular(uu.conj().T, u10.conj().T, lower=True), unit_diagonal=True, ).conj().T.dot(up.conj().T) if balanced: x *= sca[:m, None] * sca[:m] # Check the deviation from symmetry for lack of success # See proof of Thm.5 item 3 in [2] u_sym = u00.conj().T.dot(u10) n_u_sym = norm(u_sym, 1) u_sym = u_sym - u_sym.conj().T sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym]) if norm(u_sym, 1) > sym_threshold: raise LinAlgError('The associated Hamiltonian pencil has eigenvalues ' 'too close to the imaginary axis') return (x + x.conj().T)/2 def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True): r""" Solves the discrete-time algebraic Riccati equation (DARE). The DARE is defined as .. math:: A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0 The limitations for a solution to exist are : * All eigenvalues of :math:`A` outside the unit disc, should be controllable. * The associated symplectic pencil (See Notes), should have eigenvalues sufficiently away from the unit circle. Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the generalized version of DARE .. math:: A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0 is solved. When omitted, ``e`` is assumed to be the identity and ``s`` is assumed to be the zero matrix. Parameters ---------- a : (M, M) array_like Square matrix b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Square matrix e : (M, M) array_like, optional Nonsingular square matrix s : (M, N) array_like, optional Input balanced : bool The boolean that indicates whether a balancing step is performed on the data. The default is set to True. Returns ------- x : (M, M) ndarray Solution to the discrete algebraic Riccati equation. Raises ------ LinAlgError For cases where the stable subspace of the pencil could not be isolated. See Notes section and the references for details. See Also -------- solve_continuous_are : Solves the continuous algebraic Riccati equation Notes ----- The equation is solved by forming the extended symplectic matrix pencil, as described in [1]_, :math:`H - \lambda J` given by the block matrices :: [ A 0 B ] [ E 0 B ] [ -Q E^H -S ] - \lambda * [ 0 A^H 0 ] [ S^H 0 R ] [ 0 -B^H 0 ] and using a QZ decomposition method. In this algorithm, the fail conditions are linked to the symmetry of the product :math:`U_2 U_1^{-1}` and condition number of :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the eigenvectors spanning the stable subspace with 2m rows and partitioned into two m-row matrices. See [1]_ and [2]_ for more details. In order to improve the QZ decomposition accuracy, the pencil goes through a balancing step where the sum of absolute values of :math:`H` and :math:`J` rows/cols (after removing the diagonal entries) is balanced following the recipe given in [3]_. If the data has small numerical noise, balancing may amplify their effects and some clean up is required. .. versionadded:: 0.11.0 References ---------- .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving Riccati Equations.", SIAM Journal on Scientific and Statistical Computing, Vol.2(2), DOI: 10.1137/0902010 .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati Equations.", Massachusetts Institute of Technology. Laboratory for Information and Decision Systems. LIDS-R ; 859. Available online : http://hdl.handle.net/1721.1/1301 .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001, SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993 Examples -------- Given `a`, `b`, `q`, and `r` solve for `x`: >>> from scipy import linalg as la >>> a = np.array([[0, 1], [0, -1]]) >>> b = np.array([[1, 0], [2, 1]]) >>> q = np.array([[-4, -4], [-4, 7]]) >>> r = np.array([[9, 3], [3, 1]]) >>> x = la.solve_discrete_are(a, b, q, r) >>> x array([[-4., -4.], [-4., 7.]]) >>> R = la.solve(r + b.T.dot(x).dot(b), b.T.dot(x).dot(a)) >>> np.allclose(a.T.dot(x).dot(a) - x - a.T.dot(x).dot(b).dot(R), -q) True """ # Validate input arguments a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args( a, b, q, r, e, s, 'dare') # Form the matrix pencil H = np.zeros((2*m+n, 2*m+n), dtype=r_or_c) H[:m, :m] = a H[:m, 2*m:] = b H[m:2*m, :m] = -q H[m:2*m, m:2*m] = np.eye(m) if e is None else e.conj().T H[m:2*m, 2*m:] = 0. if s is None else -s H[2*m:, :m] = 0. if s is None else s.conj().T H[2*m:, 2*m:] = r J = np.zeros_like(H, dtype=r_or_c) J[:m, :m] = np.eye(m) if e is None else e J[m:2*m, m:2*m] = a.conj().T J[2*m:, m:2*m] = -b.conj().T if balanced: # xGEBAL does not remove the diagonals before scaling. Also # to avoid destroying the Symplectic structure, we follow Ref.3 M = np.abs(H) + np.abs(J) M[np.diag_indices_from(M)] = 0. _, (sca, _) = matrix_balance(M, separate=1, permute=0) # do we need to bother? if not np.allclose(sca, np.ones_like(sca)): # Now impose diag(D,inv(D)) from Benner where D is # square root of s_i/s_(n+i) for i=0,.... sca = np.log2(sca) # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !! s = np.round((sca[m:2*m] - sca[:m])/2) sca = 2 ** np.r_[s, -s, sca[2*m:]] # Elementwise multiplication via broadcasting. elwisescale = sca[:, None] * np.reciprocal(sca) H *= elwisescale J *= elwisescale # Deflate the pencil by the R column ala Ref.1 q_of_qr, _ = qr(H[:, -n:]) H = q_of_qr[:, n:].conj().T.dot(H[:, :2*m]) J = q_of_qr[:, n:].conj().T.dot(J[:, :2*m]) # Decide on which output type is needed for QZ out_str = 'real' if r_or_c == float else 'complex' _, _, _, _, _, u = ordqz(H, J, sort='iuc', overwrite_a=True, overwrite_b=True, check_finite=False, output=out_str) # Get the relevant parts of the stable subspace basis if e is not None: u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m]))) u00 = u[:m, :m] u10 = u[m:, :m] # Solve via back-substituion after checking the condition of u00 up, ul, uu = lu(u00) if 1/cond(uu) < np.spacing(1.): raise LinAlgError('Failed to find a finite solution.') # Exploit the triangular structure x = solve_triangular(ul.conj().T, solve_triangular(uu.conj().T, u10.conj().T, lower=True), unit_diagonal=True, ).conj().T.dot(up.conj().T) if balanced: x *= sca[:m, None] * sca[:m] # Check the deviation from symmetry for lack of success # See proof of Thm.5 item 3 in [2] u_sym = u00.conj().T.dot(u10) n_u_sym = norm(u_sym, 1) u_sym = u_sym - u_sym.conj().T sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym]) if norm(u_sym, 1) > sym_threshold: raise LinAlgError('The associated symplectic pencil has eigenvalues' 'too close to the unit circle') return (x + x.conj().T)/2 def _are_validate_args(a, b, q, r, e, s, eq_type='care'): """ A helper function to validate the arguments supplied to the Riccati equation solvers. Any discrepancy found in the input matrices leads to a ``ValueError`` exception. Essentially, it performs: - a check whether the input is free of NaN and Infs. - a pass for the data through ``numpy.atleast_2d()`` - squareness check of the relevant arrays, - shape consistency check of the arrays, - singularity check of the relevant arrays, - symmetricity check of the relevant matrices, - a check whether the regular or the generalized version is asked. This function is used by ``solve_continuous_are`` and ``solve_discrete_are``. Parameters ---------- a, b, q, r, e, s : array_like Input data eq_type : str Accepted arguments are 'care' and 'dare'. Returns ------- a, b, q, r, e, s : ndarray Regularized input data m, n : int shape of the problem r_or_c : type Data type of the problem, returns float or complex gen_or_not : bool Type of the equation, True for generalized and False for regular ARE. """ if not eq_type.lower() in ('dare', 'care'): raise ValueError("Equation type unknown. " "Only 'care' and 'dare' is understood") a = np.atleast_2d(_asarray_validated(a, check_finite=True)) b = np.atleast_2d(_asarray_validated(b, check_finite=True)) q = np.atleast_2d(_asarray_validated(q, check_finite=True)) r = np.atleast_2d(_asarray_validated(r, check_finite=True)) # Get the correct data types otherwise Numpy complains # about pushing complex numbers into real arrays. r_or_c = complex if np.iscomplexobj(b) else float for ind, mat in enumerate((a, q, r)): if np.iscomplexobj(mat): r_or_c = complex if not np.equal(*mat.shape): raise ValueError("Matrix {} should be square.".format("aqr"[ind])) # Shape consistency checks m, n = b.shape if m != a.shape[0]: raise ValueError("Matrix a and b should have the same number of rows.") if m != q.shape[0]: raise ValueError("Matrix a and q should have the same shape.") if n != r.shape[0]: raise ValueError("Matrix b and r should have the same number of cols.") # Check if the data matrices q, r are (sufficiently) hermitian for ind, mat in enumerate((q, r)): if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100: raise ValueError("Matrix {} should be symmetric/hermitian." "".format("qr"[ind])) # Continuous time ARE should have a nonsingular r matrix. if eq_type == 'care': min_sv = svd(r, compute_uv=False)[-1] if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1): raise ValueError('Matrix r is numerically singular.') # Check if the generalized case is required with omitted arguments # perform late shape checking etc. generalized_case = e is not None or s is not None if generalized_case: if e is not None: e = np.atleast_2d(_asarray_validated(e, check_finite=True)) if not np.equal(*e.shape): raise ValueError("Matrix e should be square.") if m != e.shape[0]: raise ValueError("Matrix a and e should have the same shape.") # numpy.linalg.cond doesn't check for exact zeros and # emits a runtime warning. Hence the following manual check. min_sv = svd(e, compute_uv=False)[-1] if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1): raise ValueError('Matrix e is numerically singular.') if np.iscomplexobj(e): r_or_c = complex if s is not None: s = np.atleast_2d(_asarray_validated(s, check_finite=True)) if s.shape != b.shape: raise ValueError("Matrix b and s should have the same shape.") if np.iscomplexobj(s): r_or_c = complex return a, b, q, r, e, s, m, n, r_or_c, generalized_case
28,279
32.467456
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/__init__.py
""" ==================================== Linear algebra (:mod:`scipy.linalg`) ==================================== .. currentmodule:: scipy.linalg Linear algebra functions. .. seealso:: `numpy.linalg` for more linear algebra functions. Note that although `scipy.linalg` imports most of them, identically named functions from `scipy.linalg` may offer more or slightly differing functionality. Basics ====== .. autosummary:: :toctree: generated/ inv - Find the inverse of a square matrix solve - Solve a linear system of equations solve_banded - Solve a banded linear system solveh_banded - Solve a Hermitian or symmetric banded system solve_circulant - Solve a circulant system solve_triangular - Solve a triangular matrix solve_toeplitz - Solve a toeplitz matrix det - Find the determinant of a square matrix norm - Matrix and vector norm lstsq - Solve a linear least-squares problem pinv - Pseudo-inverse (Moore-Penrose) using lstsq pinv2 - Pseudo-inverse using svd pinvh - Pseudo-inverse of hermitian matrix kron - Kronecker product of two arrays tril - Construct a lower-triangular matrix from a given matrix triu - Construct an upper-triangular matrix from a given matrix orthogonal_procrustes - Solve an orthogonal Procrustes problem matrix_balance - Balance matrix entries with a similarity transformation subspace_angles - Compute the subspace angles between two matrices LinAlgError LinAlgWarning Eigenvalue Problems =================== .. autosummary:: :toctree: generated/ eig - Find the eigenvalues and eigenvectors of a square matrix eigvals - Find just the eigenvalues of a square matrix eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix eig_banded - Find the eigenvalues and eigenvectors of a banded matrix eigvals_banded - Find just the eigenvalues of a banded matrix eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix Decompositions ============== .. autosummary:: :toctree: generated/ lu - LU decomposition of a matrix lu_factor - LU decomposition returning unordered matrix and pivots lu_solve - Solve Ax=b using back substitution with output of lu_factor svd - Singular value decomposition of a matrix svdvals - Singular values of a matrix diagsvd - Construct matrix of singular values from output of svd orth - Construct orthonormal basis for the range of A using svd null_space - Construct orthonormal basis for the null space of A using svd ldl - LDL.T decomposition of a Hermitian or a symmetric matrix. cholesky - Cholesky decomposition of a matrix cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix cho_factor - Cholesky decomposition for use in solving a linear system cho_solve - Solve previously factored linear system cho_solve_banded - Solve previously factored banded linear system polar - Compute the polar decomposition. qr - QR decomposition of a matrix qr_multiply - QR decomposition and multiplication by Q qr_update - Rank k QR update qr_delete - QR downdate on row or column deletion qr_insert - QR update on row or column insertion rq - RQ decomposition of a matrix qz - QZ decomposition of a pair of matrices ordqz - QZ decomposition of a pair of matrices with reordering schur - Schur decomposition of a matrix rsf2csf - Real to complex Schur form hessenberg - Hessenberg form of a matrix cdf2rdf - Complex diagonal form to real diagonal block form .. seealso:: `scipy.linalg.interpolative` -- Interpolative matrix decompositions Matrix Functions ================ .. autosummary:: :toctree: generated/ expm - Matrix exponential logm - Matrix logarithm cosm - Matrix cosine sinm - Matrix sine tanm - Matrix tangent coshm - Matrix hyperbolic cosine sinhm - Matrix hyperbolic sine tanhm - Matrix hyperbolic tangent signm - Matrix sign sqrtm - Matrix square root funm - Evaluating an arbitrary matrix function expm_frechet - Frechet derivative of the matrix exponential expm_cond - Relative condition number of expm in the Frobenius norm fractional_matrix_power - Fractional matrix power Matrix Equation Solvers ======================= .. autosummary:: :toctree: generated/ solve_sylvester - Solve the Sylvester matrix equation solve_continuous_are - Solve the continuous-time algebraic Riccati equation solve_discrete_are - Solve the discrete-time algebraic Riccati equation solve_continuous_lyapunov - Solve the continous-time Lyapunov equation solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation Sketches and Random Projections =============================== .. autosummary:: :toctree: generated/ clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch) Special Matrices ================ .. autosummary:: :toctree: generated/ block_diag - Construct a block diagonal matrix from submatrices circulant - Circulant matrix companion - Companion matrix dft - Discrete Fourier transform matrix hadamard - Hadamard matrix of order 2**n hankel - Hankel matrix helmert - Helmert matrix hilbert - Hilbert matrix invhilbert - Inverse Hilbert matrix leslie - Leslie matrix pascal - Pascal matrix invpascal - Inverse Pascal matrix toeplitz - Toeplitz matrix tri - Construct a matrix filled with ones at and below a given diagonal Low-level routines ================== .. autosummary:: :toctree: generated/ get_blas_funcs get_lapack_funcs find_best_blas_type .. seealso:: `scipy.linalg.blas` -- Low-level BLAS functions `scipy.linalg.lapack` -- Low-level LAPACK functions `scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython `scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython """ # noqa: E501 from __future__ import division, print_function, absolute_import from .linalg_version import linalg_version as __version__ from .misc import * from .basic import * from .decomp import * from .decomp_lu import * from ._decomp_ldl import * from .decomp_cholesky import * from .decomp_qr import * from ._decomp_qz import * from .decomp_svd import * from .decomp_schur import * from ._decomp_polar import * from .matfuncs import * from .blas import * from .lapack import * from .special_matrices import * from ._solvers import * from ._procrustes import * from ._decomp_update import * from ._sketches import * __all__ = [s for s in dir() if not s.startswith('_')] from numpy.dual import register_func for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals', 'eigvalsh', 'lstsq', 'cholesky']: try: register_func(k, eval(k)) except ValueError: pass try: register_func('pinv', pinv2) except ValueError: pass del k, register_func from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
7,176
30.204348
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/flinalg.py
# # Author: Pearu Peterson, March 2002 # from __future__ import division, print_function, absolute_import __all__ = ['get_flinalg_funcs'] # The following ensures that possibly missing flavor (C or Fortran) is # replaced with the available one. If none is available, exception # is raised at the first attempt to use the resources. try: from . import _flinalg except ImportError: _flinalg = None # from numpy.distutils.misc_util import PostponedException # _flinalg = PostponedException() # print _flinalg.__doc__ has_column_major_storage = lambda a:0 def has_column_major_storage(arr): return arr.flags['FORTRAN'] _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',.. def get_flinalg_funcs(names,arrays=(),debug=0): """Return optimal available _flinalg function objects with names. arrays are used to determine optimal prefix.""" ordering = [] for i in range(len(arrays)): t = arrays[i].dtype.char if t not in _type_conv: t = 'd' ordering.append((t,i)) if ordering: ordering.sort() required_prefix = _type_conv[ordering[0][0]] else: required_prefix = 'd' # Some routines may require special treatment. # Handle them here before the default lookup. # Default lookup: if ordering and has_column_major_storage(arrays[ordering[0][1]]): suffix1,suffix2 = '_c','_r' else: suffix1,suffix2 = '_r','_c' funcs = [] for name in names: func_name = required_prefix + name func = getattr(_flinalg,func_name+suffix1, getattr(_flinalg,func_name+suffix2,None)) funcs.append(func) return tuple(funcs)
1,728
28.305085
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/blas.py
""" Low-level BLAS functions (:mod:`scipy.linalg.blas`) =================================================== This module contains low-level functions from the BLAS library. .. versionadded:: 0.12.0 .. note:: The common ``overwrite_<>`` option in many routines, allows the input arrays to be overwritten to avoid extra memory allocation. However this requires the array to satisfy two conditions which are memory order and the data type to match exactly the order and the type expected by the routine. As an example, if you pass a double precision float array to any ``S....`` routine which expects single precision arguments, f2py will create an intermediate array to match the argument types and overwriting will be performed on that intermediate array. Similarly, if a C-contiguous array is passed, f2py will pass a FORTRAN-contiguous array internally. Please make sure that these details are satisfied. More information can be found in the f2py documentation. .. warning:: These functions do little to no error checking. It is possible to cause crashes by mis-using them, so prefer using the higher-level routines in `scipy.linalg`. Finding functions ----------------- .. autosummary:: :toctree: generated/ get_blas_funcs find_best_blas_type BLAS Level 1 functions ---------------------- .. autosummary:: :toctree: generated/ caxpy ccopy cdotc cdotu crotg cscal csrot csscal cswap dasum daxpy dcopy ddot dnrm2 drot drotg drotm drotmg dscal dswap dzasum dznrm2 icamax idamax isamax izamax sasum saxpy scasum scnrm2 scopy sdot snrm2 srot srotg srotm srotmg sscal sswap zaxpy zcopy zdotc zdotu zdrot zdscal zrotg zscal zswap BLAS Level 2 functions ---------------------- .. autosummary:: :toctree: generated/ sgbmv sgemv sger ssbmv sspr sspr2 ssymv ssyr ssyr2 stbmv stpsv strmv strsv dgbmv dgemv dger dsbmv dspr dspr2 dsymv dsyr dsyr2 dtbmv dtpsv dtrmv dtrsv cgbmv cgemv cgerc cgeru chbmv chemv cher cher2 chpmv chpr chpr2 ctbmv ctbsv ctpmv ctpsv ctrmv ctrsv csyr zgbmv zgemv zgerc zgeru zhbmv zhemv zher zher2 zhpmv zhpr zhpr2 ztbmv ztbsv ztpmv ztrmv ztrsv zsyr BLAS Level 3 functions ---------------------- .. autosummary:: :toctree: generated/ sgemm ssymm ssyr2k ssyrk strmm strsm dgemm dsymm dsyr2k dsyrk dtrmm dtrsm cgemm chemm cher2k cherk csymm csyr2k csyrk ctrmm ctrsm zgemm zhemm zher2k zherk zsymm zsyr2k zsyrk ztrmm ztrsm """ # # Author: Pearu Peterson, March 2002 # refactoring by Fabian Pedregosa, March 2010 # from __future__ import division, print_function, absolute_import __all__ = ['get_blas_funcs', 'find_best_blas_type'] import numpy as _np from scipy.linalg import _fblas try: from scipy.linalg import _cblas except ImportError: _cblas = None # Expose all functions (only fblas --- cblas is an implementation detail) empty_module = None from scipy.linalg._fblas import * del empty_module # 'd' will be default for 'i',.. _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'} # some convenience alias for complex functions _blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2', 'cdot': 'cdotc', 'zdot': 'zdotc', 'cger': 'cgerc', 'zger': 'zgerc', 'sdotc': 'sdot', 'sdotu': 'sdot', 'ddotc': 'ddot', 'ddotu': 'ddot'} def find_best_blas_type(arrays=(), dtype=None): """Find best-matching BLAS/LAPACK type. Arrays are used to determine the optimal prefix of BLAS routines. Parameters ---------- arrays : sequence of ndarrays, optional Arrays can be given to determine optimal prefix of BLAS routines. If not given, double-precision routines will be used, otherwise the most generic type in arrays will be used. dtype : str or dtype, optional Data-type specifier. Not used if `arrays` is non-empty. Returns ------- prefix : str BLAS/LAPACK prefix character. dtype : dtype Inferred Numpy data type. prefer_fortran : bool Whether to prefer Fortran order routines over C order. Examples -------- >>> import scipy.linalg.blas as bla >>> a = np.random.rand(10,15) >>> b = np.asfortranarray(a) # Change the memory layout order >>> bla.find_best_blas_type((a,)) ('d', dtype('float64'), False) >>> bla.find_best_blas_type((a*1j,)) ('z', dtype('complex128'), False) >>> bla.find_best_blas_type((b,)) ('d', dtype('float64'), True) """ dtype = _np.dtype(dtype) prefer_fortran = False if arrays: # use the most generic type in arrays dtypes = [ar.dtype for ar in arrays] dtype = _np.find_common_type(dtypes, ()) try: index = dtypes.index(dtype) except ValueError: index = 0 if arrays[index].flags['FORTRAN']: # prefer Fortran for leading array with column major order prefer_fortran = True prefix = _type_conv.get(dtype.char, 'd') if dtype.char == 'G': # complex256 -> complex128 (i.e., C long double -> C double) dtype = _np.dtype('D') elif dtype.char not in 'fdFD': dtype = _np.dtype('d') return prefix, dtype, prefer_fortran def _get_funcs(names, arrays, dtype, lib_name, fmodule, cmodule, fmodule_name, cmodule_name, alias): """ Return available BLAS/LAPACK functions. Used also in lapack.py. See get_blas_funcs for docstring. """ funcs = [] unpack = False dtype = _np.dtype(dtype) module1 = (cmodule, cmodule_name) module2 = (fmodule, fmodule_name) if isinstance(names, str): names = (names,) unpack = True prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype) if prefer_fortran: module1, module2 = module2, module1 for i, name in enumerate(names): func_name = prefix + name func_name = alias.get(func_name, func_name) func = getattr(module1[0], func_name, None) module_name = module1[1] if func is None: func = getattr(module2[0], func_name, None) module_name = module2[1] if func is None: raise ValueError( '%s function %s could not be found' % (lib_name, func_name)) func.module_name, func.typecode = module_name, prefix func.dtype = dtype func.prefix = prefix # Backward compatibility funcs.append(func) if unpack: return funcs[0] else: return funcs def get_blas_funcs(names, arrays=(), dtype=None): """Return available BLAS function objects from names. Arrays are used to determine the optimal prefix of BLAS routines. Parameters ---------- names : str or sequence of str Name(s) of BLAS functions without type prefix. arrays : sequence of ndarrays, optional Arrays can be given to determine optimal prefix of BLAS routines. If not given, double-precision routines will be used, otherwise the most generic type in arrays will be used. dtype : str or dtype, optional Data-type specifier. Not used if `arrays` is non-empty. Returns ------- funcs : list List containing the found function(s). Notes ----- This routine automatically chooses between Fortran/C interfaces. Fortran code is used whenever possible for arrays with column major order. In all other cases, C code is preferred. In BLAS, the naming convention is that all functions start with a type prefix, which depends on the type of the principal matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy types {float32, float64, complex64, complex128} respectively. The code and the dtype are stored in attributes `typecode` and `dtype` of the returned functions. Examples -------- >>> import scipy.linalg as LA >>> a = np.random.rand(3,2) >>> x_gemv = LA.get_blas_funcs('gemv', (a,)) >>> x_gemv.typecode 'd' >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,)) >>> x_gemv.typecode 'z' """ return _get_funcs(names, arrays, dtype, "BLAS", _fblas, _cblas, "fblas", "cblas", _blas_alias)
8,678
20.916667
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/special_matrices.py
from __future__ import division, print_function, absolute_import import math import numpy as np from scipy._lib.six import xrange from scipy._lib.six import string_types from numpy.lib.stride_tricks import as_strided __all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel', 'hadamard', 'leslie', 'kron', 'block_diag', 'companion', 'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft'] #----------------------------------------------------------------------------- # matrix construction functions #----------------------------------------------------------------------------- # # *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in # 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards # compatibility. def tri(N, M=None, k=0, dtype=None): """ Construct (N, M) matrix filled with ones at and below the k-th diagonal. The matrix has A[i,j] == 1 for i <= j + k Parameters ---------- N : int The size of the first dimension of the matrix. M : int or None, optional The size of the second dimension of the matrix. If `M` is None, `M = N` is assumed. k : int, optional Number of subdiagonal below which matrix is filled with ones. `k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0 superdiagonal. dtype : dtype, optional Data type of the matrix. Returns ------- tri : (N, M) ndarray Tri matrix. Examples -------- >>> from scipy.linalg import tri >>> tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) >>> tri(3, 5, -1, dtype=int) array([[0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 1, 0, 0, 0]]) """ if M is None: M = N if isinstance(M, string_types): #pearu: any objections to remove this feature? # As tri(N,'d') is equivalent to tri(N,dtype='d') dtype = M M = N m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M)) if dtype is None: return m else: return m.astype(dtype) def tril(m, k=0): """ Make a copy of a matrix with elements above the k-th diagonal zeroed. Parameters ---------- m : array_like Matrix whose elements to return k : int, optional Diagonal above which to zero elements. `k` == 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0 superdiagonal. Returns ------- tril : ndarray Return is the same shape and type as `m`. Examples -------- >>> from scipy.linalg import tril >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], [ 7, 8, 0], [10, 11, 12]]) """ m = np.asarray(m) out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m return out def triu(m, k=0): """ Make a copy of a matrix with elements below the k-th diagonal zeroed. Parameters ---------- m : array_like Matrix whose elements to return k : int, optional Diagonal below which to zero elements. `k` == 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0 superdiagonal. Returns ------- triu : ndarray Return matrix with zeroed elements below the k-th diagonal and has same shape and type as `m`. Examples -------- >>> from scipy.linalg import triu >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], [ 0, 8, 9], [ 0, 0, 12]]) """ m = np.asarray(m) out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m return out def toeplitz(c, r=None): """ Construct a Toeplitz matrix. The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, ``r == conjugate(c)`` is assumed. Parameters ---------- c : array_like First column of the matrix. Whatever the actual shape of `c`, it will be converted to a 1-D array. r : array_like, optional First row of the matrix. If None, ``r = conjugate(c)`` is assumed; in this case, if c[0] is real, the result is a Hermitian matrix. r[0] is ignored; the first row of the returned matrix is ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be converted to a 1-D array. Returns ------- A : (len(c), len(r)) ndarray The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. See Also -------- circulant : circulant matrix hankel : Hankel matrix solve_toeplitz : Solve a Toeplitz system. Notes ----- The behavior when `c` or `r` is a scalar, or when `c` is complex and `r` is None, was changed in version 0.8.0. The behavior in previous versions was undocumented and is no longer supported. Examples -------- >>> from scipy.linalg import toeplitz >>> toeplitz([1,2,3], [1,4,5,6]) array([[1, 4, 5, 6], [2, 1, 4, 5], [3, 2, 1, 4]]) >>> toeplitz([1.0, 2+3j, 4-1j]) array([[ 1.+0.j, 2.-3.j, 4.+1.j], [ 2.+3.j, 1.+0.j, 2.-3.j], [ 4.-1.j, 2.+3.j, 1.+0.j]]) """ c = np.asarray(c).ravel() if r is None: r = c.conjugate() else: r = np.asarray(r).ravel() # Form a 1D array containing a reversed c followed by r[1:] that could be # strided to give us toeplitz matrix. vals = np.concatenate((c[::-1], r[1:])) out_shp = len(c), len(r) n = vals.strides[0] return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy() def circulant(c): """ Construct a circulant matrix. Parameters ---------- c : (N,) array_like 1-D array, the first column of the matrix. Returns ------- A : (N, N) ndarray A circulant matrix whose first column is `c`. See Also -------- toeplitz : Toeplitz matrix hankel : Hankel matrix solve_circulant : Solve a circulant system. Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import circulant >>> circulant([1, 2, 3]) array([[1, 3, 2], [2, 1, 3], [3, 2, 1]]) """ c = np.asarray(c).ravel() # Form an extended array that could be strided to give circulant version c_ext = np.concatenate((c[::-1], c[:0:-1])) L = len(c) n = c_ext.strides[0] return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy() def hankel(c, r=None): """ Construct a Hankel matrix. The Hankel matrix has constant anti-diagonals, with `c` as its first column and `r` as its last row. If `r` is not given, then `r = zeros_like(c)` is assumed. Parameters ---------- c : array_like First column of the matrix. Whatever the actual shape of `c`, it will be converted to a 1-D array. r : array_like, optional Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed. r[0] is ignored; the last row of the returned matrix is ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be converted to a 1-D array. Returns ------- A : (len(c), len(r)) ndarray The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. See Also -------- toeplitz : Toeplitz matrix circulant : circulant matrix Examples -------- >>> from scipy.linalg import hankel >>> hankel([1, 17, 99]) array([[ 1, 17, 99], [17, 99, 0], [99, 0, 0]]) >>> hankel([1,2,3,4], [4,7,7,8,9]) array([[1, 2, 3, 4, 7], [2, 3, 4, 7, 7], [3, 4, 7, 7, 8], [4, 7, 7, 8, 9]]) """ c = np.asarray(c).ravel() if r is None: r = np.zeros_like(c) else: r = np.asarray(r).ravel() # Form a 1D array of values to be used in the matrix, containing `c` # followed by r[1:]. vals = np.concatenate((c, r[1:])) # Stride on concatenated array to get hankel matrix out_shp = len(c), len(r) n = vals.strides[0] return as_strided(vals, shape=out_shp, strides=(n, n)).copy() def hadamard(n, dtype=int): """ Construct a Hadamard matrix. Constructs an n-by-n Hadamard matrix, using Sylvester's construction. `n` must be a power of 2. Parameters ---------- n : int The order of the matrix. `n` must be a power of 2. dtype : dtype, optional The data type of the array to be constructed. Returns ------- H : (n, n) ndarray The Hadamard matrix. Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import hadamard >>> hadamard(2, dtype=complex) array([[ 1.+0.j, 1.+0.j], [ 1.+0.j, -1.-0.j]]) >>> hadamard(4) array([[ 1, 1, 1, 1], [ 1, -1, 1, -1], [ 1, 1, -1, -1], [ 1, -1, -1, 1]]) """ # This function is a slightly modified version of the # function contributed by Ivo in ticket #675. if n < 1: lg2 = 0 else: lg2 = int(math.log(n, 2)) if 2 ** lg2 != n: raise ValueError("n must be an positive integer, and n must be " "a power of 2") H = np.array([[1]], dtype=dtype) # Sylvester's construction for i in range(0, lg2): H = np.vstack((np.hstack((H, H)), np.hstack((H, -H)))) return H def leslie(f, s): """ Create a Leslie matrix. Given the length n array of fecundity coefficients `f` and the length n-1 array of survival coefficients `s`, return the associated Leslie matrix. Parameters ---------- f : (N,) array_like The "fecundity" coefficients. s : (N-1,) array_like The "survival" coefficients, has to be 1-D. The length of `s` must be one less than the length of `f`, and it must be at least 1. Returns ------- L : (N, N) ndarray The array is zero except for the first row, which is `f`, and the first sub-diagonal, which is `s`. The data-type of the array will be the data-type of ``f[0]+s[0]``. Notes ----- .. versionadded:: 0.8.0 The Leslie matrix is used to model discrete-time, age-structured population growth [1]_ [2]_. In a population with `n` age classes, two sets of parameters define a Leslie matrix: the `n` "fecundity coefficients", which give the number of offspring per-capita produced by each age class, and the `n` - 1 "survival coefficients", which give the per-capita survival rate of each age class. References ---------- .. [1] P. H. Leslie, On the use of matrices in certain population mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945) .. [2] P. H. Leslie, Some further notes on the use of matrices in population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245 (Dec. 1948) Examples -------- >>> from scipy.linalg import leslie >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7]) array([[ 0.1, 2. , 1. , 0.1], [ 0.2, 0. , 0. , 0. ], [ 0. , 0.8, 0. , 0. ], [ 0. , 0. , 0.7, 0. ]]) """ f = np.atleast_1d(f) s = np.atleast_1d(s) if f.ndim != 1: raise ValueError("Incorrect shape for f. f must be one-dimensional") if s.ndim != 1: raise ValueError("Incorrect shape for s. s must be one-dimensional") if f.size != s.size + 1: raise ValueError("Incorrect lengths for f and s. The length" " of s must be one less than the length of f.") if s.size == 0: raise ValueError("The length of s must be at least 1.") tmp = f[0] + s[0] n = f.size a = np.zeros((n, n), dtype=tmp.dtype) a[0] = f a[list(range(1, n)), list(range(0, n - 1))] = s return a def kron(a, b): """ Kronecker product. The result is the block matrix:: a[0,0]*b a[0,1]*b ... a[0,-1]*b a[1,0]*b a[1,1]*b ... a[1,-1]*b ... a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b Parameters ---------- a : (M, N) ndarray Input array b : (P, Q) ndarray Input array Returns ------- A : (M*P, N*Q) ndarray Kronecker product of `a` and `b`. Examples -------- >>> from numpy import array >>> from scipy.linalg import kron >>> kron(array([[1,2],[3,4]]), array([[1,1,1]])) array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]]) """ if not a.flags['CONTIGUOUS']: a = np.reshape(a, a.shape) if not b.flags['CONTIGUOUS']: b = np.reshape(b, b.shape) o = np.outer(a, b) o = o.reshape(a.shape + b.shape) return np.concatenate(np.concatenate(o, axis=1), axis=1) def block_diag(*arrs): """ Create a block diagonal matrix from provided arrays. Given the inputs `A`, `B` and `C`, the output will have these arrays arranged on the diagonal:: [[A, 0, 0], [0, B, 0], [0, 0, C]] Parameters ---------- A, B, C, ... : array_like, up to 2-D Input arrays. A 1-D array or array_like sequence of length `n` is treated as a 2-D array with shape ``(1,n)``. Returns ------- D : ndarray Array with `A`, `B`, `C`, ... on the diagonal. `D` has the same dtype as `A`. Notes ----- If all the input arrays are square, the output is known as a block diagonal matrix. Empty sequences (i.e., array-likes of zero size) will not be ignored. Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``. Examples -------- >>> from scipy.linalg import block_diag >>> A = [[1, 0], ... [0, 1]] >>> B = [[3, 4, 5], ... [6, 7, 8]] >>> C = [[7]] >>> P = np.zeros((2, 0), dtype='int32') >>> block_diag(A, B, C) array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 3, 4, 5, 0], [0, 0, 6, 7, 8, 0], [0, 0, 0, 0, 0, 7]]) >>> block_diag(A, P, B, C) array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 4, 5, 0], [0, 0, 6, 7, 8, 0], [0, 0, 0, 0, 0, 7]]) >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]]) array([[ 1., 0., 0., 0., 0.], [ 0., 2., 3., 0., 0.], [ 0., 0., 0., 4., 5.], [ 0., 0., 0., 6., 7.]]) """ if arrs == (): arrs = ([],) arrs = [np.atleast_2d(a) for a in arrs] bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2] if bad_args: raise ValueError("arguments in the following positions have dimension " "greater than 2: %s" % bad_args) shapes = np.array([a.shape for a in arrs]) out_dtype = np.find_common_type([arr.dtype for arr in arrs], []) out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype) r, c = 0, 0 for i, (rr, cc) in enumerate(shapes): out[r:r + rr, c:c + cc] = arrs[i] r += rr c += cc return out def companion(a): """ Create a companion matrix. Create the companion matrix [1]_ associated with the polynomial whose coefficients are given in `a`. Parameters ---------- a : (N,) array_like 1-D array of polynomial coefficients. The length of `a` must be at least two, and ``a[0]`` must not be zero. Returns ------- c : (N-1, N-1) ndarray The first row of `c` is ``-a[1:]/a[0]``, and the first sub-diagonal is all ones. The data-type of the array is the same as the data-type of ``1.0*a[0]``. Raises ------ ValueError If any of the following are true: a) ``a.ndim != 1``; b) ``a.size < 2``; c) ``a[0] == 0``. Notes ----- .. versionadded:: 0.8.0 References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> from scipy.linalg import companion >>> companion([1, -10, 31, -30]) array([[ 10., -31., 30.], [ 1., 0., 0.], [ 0., 1., 0.]]) """ a = np.atleast_1d(a) if a.ndim != 1: raise ValueError("Incorrect shape for `a`. `a` must be " "one-dimensional.") if a.size < 2: raise ValueError("The length of `a` must be at least 2.") if a[0] == 0: raise ValueError("The first coefficient in `a` must not be zero.") first_row = -a[1:] / (1.0 * a[0]) n = a.size c = np.zeros((n - 1, n - 1), dtype=first_row.dtype) c[0] = first_row c[list(range(1, n - 1)), list(range(0, n - 2))] = 1 return c def helmert(n, full=False): """ Create a Helmert matrix of order `n`. This has applications in statistics, compositional or simplicial analysis, and in Aitchison geometry. Parameters ---------- n : int The size of the array to create. full : bool, optional If True the (n, n) ndarray will be returned. Otherwise the submatrix that does not include the first row will be returned. Default: False. Returns ------- M : ndarray The Helmert matrix. The shape is (n, n) or (n-1, n) depending on the `full` argument. Examples -------- >>> from scipy.linalg import helmert >>> helmert(5, full=True) array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ], [ 0.70710678, -0.70710678, 0. , 0. , 0. ], [ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ], [ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ], [ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]]) """ H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n)) d = np.arange(n) * np.arange(1, n+1) H[0] = 1 d[0] = n H_full = H / np.sqrt(d)[:, np.newaxis] if full: return H_full else: return H_full[1:] def hilbert(n): """ Create a Hilbert matrix of order `n`. Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`. Parameters ---------- n : int The size of the array to create. Returns ------- h : (n, n) ndarray The Hilbert matrix. See Also -------- invhilbert : Compute the inverse of a Hilbert matrix. Notes ----- .. versionadded:: 0.10.0 Examples -------- >>> from scipy.linalg import hilbert >>> hilbert(3) array([[ 1. , 0.5 , 0.33333333], [ 0.5 , 0.33333333, 0.25 ], [ 0.33333333, 0.25 , 0.2 ]]) """ values = 1.0 / (1.0 + np.arange(2 * n - 1)) h = hankel(values[:n], r=values[n - 1:]) return h def invhilbert(n, exact=False): """ Compute the inverse of the Hilbert matrix of order `n`. The entries in the inverse of a Hilbert matrix are integers. When `n` is greater than 14, some entries in the inverse exceed the upper limit of 64 bit integers. The `exact` argument provides two options for dealing with these large integers. Parameters ---------- n : int The order of the Hilbert matrix. exact : bool, optional If False, the data type of the array that is returned is np.float64, and the array is an approximation of the inverse. If True, the array is the exact integer inverse array. To represent the exact inverse when n > 14, the returned array is an object array of long integers. For n <= 14, the exact inverse is returned as an array with data type np.int64. Returns ------- invh : (n, n) ndarray The data type of the array is np.float64 if `exact` is False. If `exact` is True, the data type is either np.int64 (for n <= 14) or object (for n > 14). In the latter case, the objects in the array will be long integers. See Also -------- hilbert : Create a Hilbert matrix. Notes ----- .. versionadded:: 0.10.0 Examples -------- >>> from scipy.linalg import invhilbert >>> invhilbert(4) array([[ 16., -120., 240., -140.], [ -120., 1200., -2700., 1680.], [ 240., -2700., 6480., -4200.], [ -140., 1680., -4200., 2800.]]) >>> invhilbert(4, exact=True) array([[ 16, -120, 240, -140], [ -120, 1200, -2700, 1680], [ 240, -2700, 6480, -4200], [ -140, 1680, -4200, 2800]], dtype=int64) >>> invhilbert(16)[7,7] 4.2475099528537506e+19 >>> invhilbert(16, exact=True)[7,7] 42475099528537378560L """ from scipy.special import comb if exact: if n > 14: dtype = object else: dtype = np.int64 else: dtype = np.float64 invh = np.empty((n, n), dtype=dtype) for i in xrange(n): for j in xrange(0, i + 1): s = i + j invh[i, j] = ((-1) ** s * (s + 1) * comb(n + i, n - j - 1, exact) * comb(n + j, n - i - 1, exact) * comb(s, i, exact) ** 2) if i != j: invh[j, i] = invh[i, j] return invh def pascal(n, kind='symmetric', exact=True): """ Returns the n x n Pascal matrix. The Pascal matrix is a matrix containing the binomial coefficients as its elements. Parameters ---------- n : int The size of the matrix to create; that is, the result is an n x n matrix. kind : str, optional Must be one of 'symmetric', 'lower', or 'upper'. Default is 'symmetric'. exact : bool, optional If `exact` is True, the result is either an array of type numpy.uint64 (if n < 35) or an object array of Python long integers. If `exact` is False, the coefficients in the matrix are computed using `scipy.special.comb` with `exact=False`. The result will be a floating point array, and the values in the array will not be the exact coefficients, but this version is much faster than `exact=True`. Returns ------- p : (n, n) ndarray The Pascal matrix. See Also -------- invpascal Notes ----- See http://en.wikipedia.org/wiki/Pascal_matrix for more information about Pascal matrices. .. versionadded:: 0.11.0 Examples -------- >>> from scipy.linalg import pascal >>> pascal(4) array([[ 1, 1, 1, 1], [ 1, 2, 3, 4], [ 1, 3, 6, 10], [ 1, 4, 10, 20]], dtype=uint64) >>> pascal(4, kind='lower') array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 2, 1, 0], [1, 3, 3, 1]], dtype=uint64) >>> pascal(50)[-1, -1] 25477612258980856902730428600L >>> from scipy.special import comb >>> comb(98, 49, exact=True) 25477612258980856902730428600L """ from scipy.special import comb if kind not in ['symmetric', 'lower', 'upper']: raise ValueError("kind must be 'symmetric', 'lower', or 'upper'") if exact: if n >= 35: L_n = np.empty((n, n), dtype=object) L_n.fill(0) else: L_n = np.zeros((n, n), dtype=np.uint64) for i in range(n): for j in range(i + 1): L_n[i, j] = comb(i, j, exact=True) else: L_n = comb(*np.ogrid[:n, :n]) if kind == 'lower': p = L_n elif kind == 'upper': p = L_n.T else: p = np.dot(L_n, L_n.T) return p def invpascal(n, kind='symmetric', exact=True): """ Returns the inverse of the n x n Pascal matrix. The Pascal matrix is a matrix containing the binomial coefficients as its elements. Parameters ---------- n : int The size of the matrix to create; that is, the result is an n x n matrix. kind : str, optional Must be one of 'symmetric', 'lower', or 'upper'. Default is 'symmetric'. exact : bool, optional If `exact` is True, the result is either an array of type `numpy.int64` (if `n` <= 35) or an object array of Python integers. If `exact` is False, the coefficients in the matrix are computed using `scipy.special.comb` with `exact=False`. The result will be a floating point array, and for large `n`, the values in the array will not be the exact coefficients. Returns ------- invp : (n, n) ndarray The inverse of the Pascal matrix. See Also -------- pascal Notes ----- .. versionadded:: 0.16.0 References ---------- .. [1] "Pascal matrix", http://en.wikipedia.org/wiki/Pascal_matrix .. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical Gazette, 59(408), pp. 111-112, 1975. Examples -------- >>> from scipy.linalg import invpascal, pascal >>> invp = invpascal(5) >>> invp array([[ 5, -10, 10, -5, 1], [-10, 30, -35, 19, -4], [ 10, -35, 46, -27, 6], [ -5, 19, -27, 17, -4], [ 1, -4, 6, -4, 1]]) >>> p = pascal(5) >>> p.dot(invp) array([[ 1., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) An example of the use of `kind` and `exact`: >>> invpascal(5, kind='lower', exact=False) array([[ 1., -0., 0., -0., 0.], [-1., 1., -0., 0., -0.], [ 1., -2., 1., -0., 0.], [-1., 3., -3., 1., -0.], [ 1., -4., 6., -4., 1.]]) """ from scipy.special import comb if kind not in ['symmetric', 'lower', 'upper']: raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.") if kind == 'symmetric': if exact: if n > 34: dt = object else: dt = np.int64 else: dt = np.float64 invp = np.empty((n, n), dtype=dt) for i in range(n): for j in range(0, i + 1): v = 0 for k in range(n - i): v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j, exact=exact) invp[i, j] = (-1)**(i - j) * v if i != j: invp[j, i] = invp[i, j] else: # For the 'lower' and 'upper' cases, we computer the inverse by # changing the sign of every other diagonal of the pascal matrix. invp = pascal(n, kind=kind, exact=exact) if invp.dtype == np.uint64: # This cast from np.uint64 to int64 OK, because if `kind` is not # "symmetric", the values in invp are all much less than 2**63. invp = invp.view(np.int64) # The toeplitz matrix has alternating bands of 1 and -1. invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype) return invp def dft(n, scale=None): """ Discrete Fourier transform matrix. Create the matrix that computes the discrete Fourier transform of a sequence [1]_. The n-th primitive root of unity used to generate the matrix is exp(-2*pi*i/n), where i = sqrt(-1). Parameters ---------- n : int Size the matrix to create. scale : str, optional Must be None, 'sqrtn', or 'n'. If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`. If `scale` is 'n', the matrix is divided by `n`. If `scale` is None (the default), the matrix is not normalized, and the return value is simply the Vandermonde matrix of the roots of unity. Returns ------- m : (n, n) ndarray The DFT matrix. Notes ----- When `scale` is None, multiplying a vector by the matrix returned by `dft` is mathematically equivalent to (but much less efficient than) the calculation performed by `scipy.fftpack.fft`. .. versionadded:: 0.14.0 References ---------- .. [1] "DFT matrix", http://en.wikipedia.org/wiki/DFT_matrix Examples -------- >>> from scipy.linalg import dft >>> np.set_printoptions(precision=5, suppress=True) >>> x = np.array([1, 2, 3, 0, 3, 2, 1, 0]) >>> m = dft(8) >>> m.dot(x) # Compute the DFT of x array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j, -0.+4.j, -2.+2.j]) Verify that ``m.dot(x)`` is the same as ``fft(x)``. >>> from scipy.fftpack import fft >>> fft(x) # Same result as m.dot(x) array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j, 0.+4.j, -2.+2.j]) """ if scale not in [None, 'sqrtn', 'n']: raise ValueError("scale must be None, 'sqrtn', or 'n'; " "%r is not valid." % (scale,)) omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1) m = omegas ** np.arange(n) if scale == 'sqrtn': m /= math.sqrt(n) elif scale == 'n': m /= n return m
29,660
27.547642
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/decomp_cholesky.py
"""Cholesky decomposition functions.""" from __future__ import division, print_function, absolute_import from numpy import asarray_chkfinite, asarray, atleast_2d # Local imports from .misc import LinAlgError, _datacopied from .lapack import get_lapack_funcs __all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', 'cho_solve_banded'] def _cholesky(a, lower=False, overwrite_a=False, clean=True, check_finite=True): """Common code for cholesky() and cho_factor().""" a1 = asarray_chkfinite(a) if check_finite else asarray(a) a1 = atleast_2d(a1) # Dimension check if a1.ndim != 2: raise ValueError('Input array needs to be 2 dimensional but received ' 'a {}d-array.'.format(a1.ndim)) # Squareness check if a1.shape[0] != a1.shape[1]: raise ValueError('Input array is expected to be square but has ' 'the shape: {}.'.format(a1.shape)) # Quick return for square empty array if a1.size == 0: return a1.copy(), lower overwrite_a = overwrite_a or _datacopied(a1, a) potrf, = get_lapack_funcs(('potrf',), (a1,)) c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean) if info > 0: raise LinAlgError("%d-th leading minor of the array is not positive " "definite" % info) if info < 0: raise ValueError('LAPACK reported an illegal value in {}-th argument' 'on entry to "POTRF".'.format(-info)) return c, lower def cholesky(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix. Returns the Cholesky decomposition, :math:`A = L L^*` or :math:`A = U^* U` of a Hermitian positive-definite matrix A. Parameters ---------- a : (M, M) array_like Matrix to be decomposed lower : bool, optional Whether to compute the upper or lower triangular Cholesky factorization. Default is upper-triangular. overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (M, M) ndarray Upper- or lower-triangular Cholesky factor of `a`. Raises ------ LinAlgError : if decomposition fails. Examples -------- >>> from scipy.linalg import cholesky >>> a = np.array([[1,-2j],[2j,5]]) >>> L = cholesky(a, lower=True) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> L @ L.T.conj() array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) """ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, check_finite=check_finite) return c def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix, to use in cho_solve Returns a matrix containing the Cholesky decomposition, ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. The return value can be directly used as the first parameter to cho_solve. .. warning:: The returned matrix also contains random data in the entries not used by the Cholesky decomposition. If you need to zero these entries, use the function `cholesky` instead. Parameters ---------- a : (M, M) array_like Matrix to be decomposed lower : bool, optional Whether to compute the upper or lower triangular Cholesky factorization (Default: upper-triangular) overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (M, M) ndarray Matrix whose upper or lower triangle contains the Cholesky factor of `a`. Other parts of the matrix contain random data. lower : bool Flag indicating whether the factor is in the lower or upper triangle Raises ------ LinAlgError Raised if decomposition fails. See also -------- cho_solve : Solve a linear set equations using the Cholesky factorization of a matrix. Examples -------- >>> from scipy.linalg import cho_factor >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) >>> c, low = cho_factor(A) >>> c array([[3. , 1. , 0.33333333, 1.66666667], [3. , 2.44948974, 1.90515869, -0.27216553], [1. , 5. , 2.29330749, 0.8559528 ], [5. , 1. , 2. , 1.55418563]]) >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4))) True """ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False, check_finite=check_finite) return c, lower def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations A x = b, given the Cholesky factorization of A. Parameters ---------- (c, lower) : tuple, (array, bool) Cholesky factorization of a, as given by cho_factor b : array Right-hand side overwrite_b : bool, optional Whether to overwrite data in b (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array The solution to the system A x = b See also -------- cho_factor : Cholesky factorization of a matrix Examples -------- >>> from scipy.linalg import cho_factor, cho_solve >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) >>> c, low = cho_factor(A) >>> x = cho_solve((c, low), [1, 1, 1, 1]) >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4)) True """ (c, lower) = c_and_lower if check_finite: b1 = asarray_chkfinite(b) c = asarray_chkfinite(c) else: b1 = asarray(b) c = asarray(c) if c.ndim != 2 or c.shape[0] != c.shape[1]: raise ValueError("The factored matrix c is not square.") if c.shape[1] != b1.shape[0]: raise ValueError("incompatible dimensions.") overwrite_b = overwrite_b or _datacopied(b1, b) potrs, = get_lapack_funcs(('potrs',), (c, b1)) x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b) if info != 0: raise ValueError('illegal value in %d-th argument of internal potrs' % -info) return x def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True): """ Cholesky decompose a banded Hermitian positive-definite matrix The matrix a is stored in ab either in lower diagonal or upper diagonal ordered form:: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of ab (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Parameters ---------- ab : (u + 1, M) array_like Banded matrix overwrite_ab : bool, optional Discard data in ab (may enhance performance) lower : bool, optional Is the matrix in the lower form. (Default is upper form) check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (u + 1, M) ndarray Cholesky factorization of a, in the same banded format as ab Examples -------- >>> from scipy.linalg import cholesky_banded >>> from numpy import allclose, zeros, diag >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) >>> A = A + A.conj().T + np.diag(Ab[2, :]) >>> c = cholesky_banded(Ab) >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :]) >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5))) True """ if check_finite: ab = asarray_chkfinite(ab) else: ab = asarray(ab) pbtrf, = get_lapack_funcs(('pbtrf',), (ab,)) c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal pbtrf' % -info) return c def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True): """ Solve the linear equations ``A x = b``, given the Cholesky factorization of the banded hermitian ``A``. Parameters ---------- (cb, lower) : tuple, (ndarray, bool) `cb` is the Cholesky factorization of A, as given by cholesky_banded. `lower` must be the same value that was given to cholesky_banded. b : array_like Right-hand side overwrite_b : bool, optional If True, the function will overwrite the values in `b`. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array The solution to the system A x = b See also -------- cholesky_banded : Cholesky factorization of a banded matrix Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import cholesky_banded, cho_solve_banded >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) >>> A = A + A.conj().T + np.diag(Ab[2, :]) >>> c = cholesky_banded(Ab) >>> x = cho_solve_banded((c, False), np.ones(5)) >>> np.allclose(A @ x - np.ones(5), np.zeros(5)) True """ (cb, lower) = cb_and_lower if check_finite: cb = asarray_chkfinite(cb) b = asarray_chkfinite(b) else: cb = asarray(cb) b = asarray(b) # Validate shapes. if cb.shape[-1] != b.shape[0]: raise ValueError("shapes of cb and b are not compatible.") pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b)) x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal pbtrs' % -info) return x
11,630
32.326648
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/basic.py
# # Author: Pearu Peterson, March 2002 # # w/ additions by Travis Oliphant, March 2002 # and Jake Vanderplas, August 2012 from __future__ import division, print_function, absolute_import from warnings import warn import numpy as np from numpy import atleast_1d, atleast_2d from .flinalg import get_flinalg_funcs from .lapack import get_lapack_funcs, _compute_lwork from .misc import LinAlgError, _datacopied, LinAlgWarning from .decomp import _asarray_validated from . import decomp, decomp_svd from ._solve_toeplitz import levinson __all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded', 'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq', 'pinv', 'pinv2', 'pinvh', 'matrix_balance'] # Linear equations def _solve_check(n, info, lamch=None, rcond=None): """ Check arguments during the different steps of the solution phase """ if info < 0: raise ValueError('LAPACK reported an illegal value in {}-th argument' '.'.format(-info)) elif 0 < info: raise LinAlgError('Matrix is singular.') if lamch is None: return E = lamch('E') if rcond < E: warn('scipy.linalg.solve\nIll-conditioned matrix detected. Result ' 'is not guaranteed to be accurate.\nReciprocal condition ' 'number{:.6e}'.format(rcond), LinAlgWarning, stacklevel=3) def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, debug=None, check_finite=True, assume_a='gen', transposed=False): """ Solves the linear equation set ``a * x = b`` for the unknown ``x`` for square ``a`` matrix. If the data matrix is known to be a particular type then supplying the corresponding string to ``assume_a`` key chooses the dedicated solver. The available options are =================== ======== generic matrix 'gen' symmetric 'sym' hermitian 'her' positive definite 'pos' =================== ======== If omitted, ``'gen'`` is the default structure. The datatype of the arrays define which solver is called regardless of the values. In other words, even when the complex array entries have precisely zero imaginary parts, the complex solver will be called based on the data type of the array. Parameters ---------- a : (N, N) array_like Square input data b : (N, NRHS) array_like Input data for the right hand side. sym_pos : bool, optional Assume `a` is symmetric and positive definite. This key is deprecated and assume_a = 'pos' keyword is recommended instead. The functionality is the same. It will be removed in the future. lower : bool, optional If True, only the data contained in the lower triangle of `a`. Default is to use upper triangle. (ignored for ``'gen'``) overwrite_a : bool, optional Allow overwriting data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. assume_a : str, optional Valid entries are explained above. transposed: bool, optional If True, ``a^T x = b`` for real matrices, raises `NotImplementedError` for complex matrices (only for True). Returns ------- x : (N, NRHS) ndarray The solution array. Raises ------ ValueError If size mismatches detected or input a is not square. LinAlgError If the matrix is singular. LinAlgWarning If an ill-conditioned input a is detected. NotImplementedError If transposed is True and input a is a complex matrix. Examples -------- Given `a` and `b`, solve for `x`: >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]]) >>> b = np.array([2, 4, -1]) >>> from scipy import linalg >>> x = linalg.solve(a, b) >>> x array([ 2., -2., 9.]) >>> np.dot(a, x) == b array([ True, True, True], dtype=bool) Notes ----- If the input b matrix is a 1D array with N elements, when supplied together with an NxN input a, it is assumed as a valid column vector despite the apparent size mismatch. This is compatible with the numpy.dot() behavior and the returned result is still 1D array. The generic, symmetric, hermitian and positive definite solutions are obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of LAPACK respectively. """ # Flags for 1D or nD right hand side b_is_1D = False a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite)) b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite)) n = a1.shape[0] overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if a1.shape[0] != a1.shape[1]: raise ValueError('Input a needs to be a square matrix.') if n != b1.shape[0]: # Last chance to catch 1x1 scalar a and 1D b arrays if not (n == 1 and b1.size != 0): raise ValueError('Input b has to have same number of rows as ' 'input a') # accommodate empty arrays if b1.size == 0: return np.asfortranarray(b1.copy()) # regularize 1D b arrays to 2D if b1.ndim == 1: if n == 1: b1 = b1[None, :] else: b1 = b1[:, None] b_is_1D = True # Backwards compatibility - old keyword. if sym_pos: assume_a = 'pos' if assume_a not in ('gen', 'sym', 'her', 'pos'): raise ValueError('{} is not a recognized matrix structure' ''.format(assume_a)) # Deprecate keyword "debug" if debug is not None: warn('Use of the "debug" keyword is deprecated ' 'and this keyword will be removed in future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) # Get the correct lamch function. # The LAMCH functions only exists for S and D # So for complex values we have to convert to real/double. if a1.dtype.char in 'fF': # single precision lamch = get_lapack_funcs('lamch', dtype='f') else: lamch = get_lapack_funcs('lamch', dtype='d') # Currently we do not have the other forms of the norm calculators # lansy, lanpo, lanhe. # However, in any case they only reduce computations slightly... lange = get_lapack_funcs('lange', (a1,)) # Since the I-norm and 1-norm are the same for symmetric matrices # we can collect them all in this one call # Note however, that when issuing 'gen' and form!='none', then # the I-norm should be used if transposed: trans = 1 norm = 'I' if np.iscomplexobj(a1): raise NotImplementedError('scipy.linalg.solve can currently ' 'not solve a^T x = b or a^H x = b ' 'for complex matrices.') else: trans = 0 norm = '1' anorm = lange(norm, a1) # Generalized case 'gesv' if assume_a == 'gen': gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'), (a1, b1)) lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a) _solve_check(n, info) x, info = getrs(lu, ipvt, b1, trans=trans, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = gecon(lu, anorm, norm=norm) # Hermitian case 'hesv' elif assume_a == 'her': hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv', 'hesv_lwork'), (a1, b1)) lwork = _compute_lwork(hesv_lw, n, lower) lu, ipvt, x, info = hesv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = hecon(lu, ipvt, anorm) # Symmetric case 'sysv' elif assume_a == 'sym': sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv', 'sysv_lwork'), (a1, b1)) lwork = _compute_lwork(sysv_lw, n, lower) lu, ipvt, x, info = sysv(a1, b1, lwork=lwork, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = sycon(lu, ipvt, anorm) # Positive definite case 'posv' else: pocon, posv = get_lapack_funcs(('pocon', 'posv'), (a1, b1)) lu, x, info = posv(a1, b1, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) _solve_check(n, info) rcond, info = pocon(lu, anorm) _solve_check(n, info, lamch, rcond) if b_is_1D: x = x.ravel() return x def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, debug=None, check_finite=True): """ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. Parameters ---------- a : (M, M) array_like A triangular matrix b : (M,) or (M, N) array_like Right-hand side matrix in `a x = b` lower : bool, optional Use only data contained in the lower triangle of `a`. Default is to use upper triangle. trans : {0, 1, 2, 'N', 'T', 'C'}, optional Type of system to solve: ======== ========= trans system ======== ========= 0 or 'N' a x = b 1 or 'T' a^T x = b 2 or 'C' a^H x = b ======== ========= unit_diagonal : bool, optional If True, diagonal elements of `a` are assumed to be 1 and will not be referenced. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, N) ndarray Solution to the system `a x = b`. Shape of return matches `b`. Raises ------ LinAlgError If `a` is singular Notes ----- .. versionadded:: 0.9.0 Examples -------- Solve the lower triangular system a x = b, where:: [3 0 0 0] [4] a = [2 1 0 0] b = [2] [1 0 1 0] [4] [1 1 1 1] [2] >>> from scipy.linalg import solve_triangular >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]]) >>> b = np.array([4, 2, 4, 2]) >>> x = solve_triangular(a, b, lower=True) >>> x array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333]) >>> a.dot(x) # Check the result array([ 4., 2., 4., 2.]) """ # Deprecate keyword "debug" if debug is not None: warn('Use of the "debug" keyword is deprecated ' 'and this keyword will be removed in the future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') if a1.shape[0] != b1.shape[0]: raise ValueError('incompatible dimensions') overwrite_b = overwrite_b or _datacopied(b1, b) if debug: print('solve:overwrite_b=', overwrite_b) trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) trtrs, = get_lapack_funcs(('trtrs',), (a1, b1)) x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, trans=trans, unitdiag=unit_diagonal) if info == 0: return x if info > 0: raise LinAlgError("singular matrix: resolution failed at diagonal %d" % (info-1)) raise ValueError('illegal value in %d-th argument of internal trtrs' % (-info)) def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False, debug=None, check_finite=True): """ Solve the equation a x = b for x, assuming a is banded matrix. The matrix a is stored in `ab` using the matrix diagonal ordered form:: ab[u + i - j, j] == a[i,j] Example of `ab` (shape of a is (6,6), `u` =1, `l` =2):: * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Parameters ---------- (l, u) : (integer, integer) Number of non-zero lower and upper diagonals ab : (`l` + `u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Returned shape depends on the shape of `b`. Examples -------- Solve the banded system a x = b, where:: [5 2 -1 0 0] [0] [1 4 2 -1 0] [1] a = [0 1 3 2 -1] b = [2] [0 0 1 2 2] [2] [0 0 0 1 1] [3] There is one nonzero diagonal below the main diagonal (l = 1), and two above (u = 2). The diagonal banded form of the matrix is:: [* * -1 -1 -1] ab = [* 2 2 2 2] [5 4 3 2 1] [1 1 1 1 *] >>> from scipy.linalg import solve_banded >>> ab = np.array([[0, 0, -1, -1, -1], ... [0, 2, 2, 2, 2], ... [5, 4, 3, 2, 1], ... [1, 1, 1, 1, 0]]) >>> b = np.array([0, 1, 2, 2, 3]) >>> x = solve_banded((1, 2), ab, b) >>> x array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ]) """ # Deprecate keyword "debug" if debug is not None: warn('Use of the "debug" keyword is deprecated ' 'and this keyword will be removed in the future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True) b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError("shapes of ab and b are not compatible.") (nlower, nupper) = l_and_u if nlower + nupper + 1 != a1.shape[0]: raise ValueError("invalid values for the number of lower and upper " "diagonals: l+u+1 (%d) does not equal ab.shape[0] " "(%d)" % (nlower + nupper + 1, ab.shape[0])) overwrite_b = overwrite_b or _datacopied(b1, b) if a1.shape[-1] == 1: b2 = np.array(b1, copy=(not overwrite_b)) b2 /= a1[1, 0] return b2 if nlower == nupper == 1: overwrite_ab = overwrite_ab or _datacopied(a1, ab) gtsv, = get_lapack_funcs(('gtsv',), (a1, b1)) du = a1[0, 1:] d = a1[1, :] dl = a1[2, :-1] du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab, overwrite_ab, overwrite_b) else: gbsv, = get_lapack_funcs(('gbsv',), (a1, b1)) a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype) a2[nlower:, :] = a1 lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True, overwrite_b=overwrite_b) if info == 0: return x if info > 0: raise LinAlgError("singular matrix") raise ValueError('illegal value in %d-th argument of internal ' 'gbsv/gtsv' % -info) def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False, check_finite=True): """ Solve equation a x = b. a is Hermitian positive-definite banded matrix. The matrix a is stored in `ab` either in lower diagonal or upper diagonal ordered form: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of `ab` (shape of a is (6, 6), `u` =2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- ab : (`u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) lower : bool, optional Is the matrix in the lower form. (Default is upper form) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Shape of return matches shape of `b`. Examples -------- Solve the banded system A x = b, where:: [ 4 2 -1 0 0 0] [1] [ 2 5 2 -1 0 0] [2] A = [-1 2 6 2 -1 0] b = [2] [ 0 -1 2 7 2 -1] [3] [ 0 0 -1 2 8 2] [3] [ 0 0 0 -1 2 9] [3] >>> from scipy.linalg import solveh_banded `ab` contains the main diagonal and the nonzero diagonals below the main diagonal. That is, we use the lower form: >>> ab = np.array([[ 4, 5, 6, 7, 8, 9], ... [ 2, 2, 2, 2, 2, 0], ... [-1, -1, -1, -1, 0, 0]]) >>> b = np.array([1, 2, 2, 3, 3, 3]) >>> x = solveh_banded(ab, b, lower=True) >>> x array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031, 0.34733894]) Solve the Hermitian banded system H x = b, where:: [ 8 2-1j 0 0 ] [ 1 ] H = [2+1j 5 1j 0 ] b = [1+1j] [ 0 -1j 9 -2-1j] [1-2j] [ 0 0 -2+1j 6 ] [ 0 ] In this example, we put the upper diagonals in the array `hb`: >>> hb = np.array([[0, 2-1j, 1j, -2-1j], ... [8, 5, 9, 6 ]]) >>> b = np.array([1, 1+1j, 1-2j, 0]) >>> x = solveh_banded(hb, b) >>> x array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j, 0.10077984-0.23035393j, -0.00479904-0.09358128j]) """ a1 = _asarray_validated(ab, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError("shapes of ab and b are not compatible.") overwrite_b = overwrite_b or _datacopied(b1, b) overwrite_ab = overwrite_ab or _datacopied(a1, ab) if a1.shape[0] == 2: ptsv, = get_lapack_funcs(('ptsv',), (a1, b1)) if lower: d = a1[0, :].real e = a1[1, :-1] else: d = a1[1, :].real e = a1[0, 1:].conj() d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, overwrite_b) else: pbsv, = get_lapack_funcs(('pbsv',), (a1, b1)) c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab, overwrite_b=overwrite_b) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'pbsv' % -info) return x def solve_toeplitz(c_or_cr, b, check_finite=True): """Solve a Toeplitz system using Levinson Recursion The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, ``r == conjugate(c)`` is assumed. Parameters ---------- c_or_cr : array_like or tuple of (array_like, array_like) The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the actual shape of ``c``, it will be converted to a 1-D array. If not supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape of ``r``, it will be converted to a 1-D array. b : (M,) or (M, K) array_like Right-hand side in ``T x = b``. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (result entirely NaNs) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system ``T x = b``. Shape of return matches shape of `b`. See Also -------- toeplitz : Toeplitz matrix Notes ----- The solution is computed using Levinson-Durbin recursion, which is faster than generic least-squares methods, but can be less numerically stable. Examples -------- Solve the Toeplitz system T x = b, where:: [ 1 -1 -2 -3] [1] T = [ 3 1 -1 -2] b = [2] [ 6 3 1 -1] [2] [10 6 3 1] [5] To specify the Toeplitz matrix, only the first column and the first row are needed. >>> c = np.array([1, 3, 6, 10]) # First column of T >>> r = np.array([1, -1, -2, -3]) # First row of T >>> b = np.array([1, 2, 2, 5]) >>> from scipy.linalg import solve_toeplitz, toeplitz >>> x = solve_toeplitz((c, r), b) >>> x array([ 1.66666667, -1. , -2.66666667, 2.33333333]) Check the result by creating the full Toeplitz matrix and multiplying it by `x`. We should get `b`. >>> T = toeplitz(c, r) >>> T.dot(x) array([ 1., 2., 2., 5.]) """ # If numerical stability of this algorithm is a problem, a future # developer might consider implementing other O(N^2) Toeplitz solvers, # such as GKO (http://www.jstor.org/stable/2153371) or Bareiss. if isinstance(c_or_cr, tuple): c, r = c_or_cr c = _asarray_validated(c, check_finite=check_finite).ravel() r = _asarray_validated(r, check_finite=check_finite).ravel() else: c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel() r = c.conjugate() # Form a 1D array of values to be used in the matrix, containing a reversed # copy of r[1:], followed by c. vals = np.concatenate((r[-1:0:-1], c)) if b is None: raise ValueError('illegal value, `b` is a required argument') b = _asarray_validated(b) if vals.shape[0] != (2*b.shape[0] - 1): raise ValueError('incompatible dimensions') if np.iscomplexobj(vals) or np.iscomplexobj(b): vals = np.asarray(vals, dtype=np.complex128, order='c') b = np.asarray(b, dtype=np.complex128) else: vals = np.asarray(vals, dtype=np.double, order='c') b = np.asarray(b, dtype=np.double) if b.ndim == 1: x, _ = levinson(vals, np.ascontiguousarray(b)) else: b_shape = b.shape b = b.reshape(b.shape[0], -1) x = np.column_stack( (levinson(vals, np.ascontiguousarray(b[:, i]))[0]) for i in range(b.shape[1])) x = x.reshape(*b_shape) return x def _get_axis_len(aname, a, axis): ax = axis if ax < 0: ax += a.ndim if 0 <= ax < a.ndim: return a.shape[ax] raise ValueError("'%saxis' entry is out of bounds" % (aname,)) def solve_circulant(c, b, singular='raise', tol=None, caxis=-1, baxis=0, outaxis=0): """Solve C x = b for x, where C is a circulant matrix. `C` is the circulant matrix associated with the vector `c`. The system is solved by doing division in Fourier space. The calculation is:: x = ifft(fft(b) / fft(c)) where `fft` and `ifft` are the fast Fourier transform and its inverse, respectively. For a large vector `c`, this is *much* faster than solving the system with the full circulant matrix. Parameters ---------- c : array_like The coefficients of the circulant matrix. b : array_like Right-hand side matrix in ``a x = b``. singular : str, optional This argument controls how a near singular circulant matrix is handled. If `singular` is "raise" and the circulant matrix is near singular, a `LinAlgError` is raised. If `singular` is "lstsq", the least squares solution is returned. Default is "raise". tol : float, optional If any eigenvalue of the circulant matrix has an absolute value that is less than or equal to `tol`, the matrix is considered to be near singular. If not given, `tol` is set to:: tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps where `abs_eigs` is the array of absolute values of the eigenvalues of the circulant matrix. caxis : int When `c` has dimension greater than 1, it is viewed as a collection of circulant vectors. In this case, `caxis` is the axis of `c` that holds the vectors of circulant coefficients. baxis : int When `b` has dimension greater than 1, it is viewed as a collection of vectors. In this case, `baxis` is the axis of `b` that holds the right-hand side vectors. outaxis : int When `c` or `b` are multidimensional, the value returned by `solve_circulant` is multidimensional. In this case, `outaxis` is the axis of the result that holds the solution vectors. Returns ------- x : ndarray Solution to the system ``C x = b``. Raises ------ LinAlgError If the circulant matrix associated with `c` is near singular. See Also -------- circulant : circulant matrix Notes ----- For a one-dimensional vector `c` with length `m`, and an array `b` with shape ``(m, ...)``, solve_circulant(c, b) returns the same result as solve(circulant(c), b) where `solve` and `circulant` are from `scipy.linalg`. .. versionadded:: 0.16.0 Examples -------- >>> from scipy.linalg import solve_circulant, solve, circulant, lstsq >>> c = np.array([2, 2, 4]) >>> b = np.array([1, 2, 3]) >>> solve_circulant(c, b) array([ 0.75, -0.25, 0.25]) Compare that result to solving the system with `scipy.linalg.solve`: >>> solve(circulant(c), b) array([ 0.75, -0.25, 0.25]) A singular example: >>> c = np.array([1, 1, 0, 0]) >>> b = np.array([1, 2, 3, 4]) Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the least square solution, use the option ``singular='lstsq'``: >>> solve_circulant(c, b, singular='lstsq') array([ 0.25, 1.25, 2.25, 1.25]) Compare to `scipy.linalg.lstsq`: >>> x, resid, rnk, s = lstsq(circulant(c), b) >>> x array([ 0.25, 1.25, 2.25, 1.25]) A broadcasting example: Suppose we have the vectors of two circulant matrices stored in an array with shape (2, 5), and three `b` vectors stored in an array with shape (3, 5). For example, >>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]]) >>> b = np.arange(15).reshape(-1, 5) We want to solve all combinations of circulant matrices and `b` vectors, with the result stored in an array with shape (2, 3, 5). When we disregard the axes of `c` and `b` that hold the vectors of coefficients, the shapes of the collections are (2,) and (3,), respectively, which are not compatible for broadcasting. To have a broadcast result with shape (2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has shape (2, 1, 5). The last dimension holds the coefficients of the circulant matrices, so when we call `solve_circulant`, we can use the default ``caxis=-1``. The coefficients of the `b` vectors are in the last dimension of the array `b`, so we use ``baxis=-1``. If we use the default `outaxis`, the result will have shape (5, 2, 3), so we'll use ``outaxis=-1`` to put the solution vectors in the last dimension. >>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1) >>> x.shape (2, 3, 5) >>> np.set_printoptions(precision=3) # For compact output of numbers. >>> x array([[[-0.118, 0.22 , 1.277, -0.142, 0.302], [ 0.651, 0.989, 2.046, 0.627, 1.072], [ 1.42 , 1.758, 2.816, 1.396, 1.841]], [[ 0.401, 0.304, 0.694, -0.867, 0.377], [ 0.856, 0.758, 1.149, -0.412, 0.831], [ 1.31 , 1.213, 1.603, 0.042, 1.286]]]) Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``): >>> solve_circulant(c[1], b[1, :]) array([ 0.856, 0.758, 1.149, -0.412, 0.831]) """ c = np.atleast_1d(c) nc = _get_axis_len("c", c, caxis) b = np.atleast_1d(b) nb = _get_axis_len("b", b, baxis) if nc != nb: raise ValueError('Incompatible c and b axis lengths') fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1) abs_fc = np.abs(fc) if tol is None: # This is the same tolerance as used in np.linalg.matrix_rank. tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps if tol.shape != (): tol.shape = tol.shape + (1,) else: tol = np.atleast_1d(tol) near_zeros = abs_fc <= tol is_near_singular = np.any(near_zeros) if is_near_singular: if singular == 'raise': raise LinAlgError("near singular circulant matrix.") else: # Replace the small values with 1 to avoid errors in the # division fb/fc below. fc[near_zeros] = 1 fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1) q = fb / fc if is_near_singular: # `near_zeros` is a boolean array, same shape as `c`, that is # True where `fc` is (near) zero. `q` is the broadcasted result # of fb / fc, so to set the values of `q` to 0 where `fc` is near # zero, we use a mask that is the broadcast result of an array # of True values shaped like `b` with `near_zeros`. mask = np.ones_like(b, dtype=bool) & near_zeros q[mask] = 0 x = np.fft.ifft(q, axis=-1) if not (np.iscomplexobj(c) or np.iscomplexobj(b)): x = x.real if outaxis != -1: x = np.rollaxis(x, -1, outaxis) return x # matrix inversion def inv(a, overwrite_a=False, check_finite=True): """ Compute the inverse of a matrix. Parameters ---------- a : array_like Square matrix to be inverted. overwrite_a : bool, optional Discard data in `a` (may improve performance). Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- ainv : ndarray Inverse of the matrix `a`. Raises ------ LinAlgError If `a` is singular. ValueError If `a` is not square, or not 2-dimensional. Examples -------- >>> from scipy import linalg >>> a = np.array([[1., 2.], [3., 4.]]) >>> linalg.inv(a) array([[-2. , 1. ], [ 1.5, -0.5]]) >>> np.dot(a, linalg.inv(a)) array([[ 1., 0.], [ 0., 1.]]) """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or _datacopied(a1, a) # XXX: I found no advantage or disadvantage of using finv. # finv, = get_flinalg_funcs(('inv',),(a1,)) # if finv is not None: # a_inv,info = finv(a1,overwrite_a=overwrite_a) # if info==0: # return a_inv # if info>0: raise LinAlgError, "singular matrix" # if info<0: raise ValueError('illegal value in %d-th argument of ' # 'internal inv.getrf|getri'%(-info)) getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri', 'getri_lwork'), (a1,)) lu, piv, info = getrf(a1, overwrite_a=overwrite_a) if info == 0: lwork = _compute_lwork(getri_lwork, a1.shape[0]) # XXX: the following line fixes curious SEGFAULT when # benchmarking 500x500 matrix inverse. This seems to # be a bug in LAPACK ?getri routine because if lwork is # minimal (when using lwork[0] instead of lwork[1]) then # all tests pass. Further investigation is required if # more such SEGFAULTs occur. lwork = int(1.01 * lwork) inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1) if info > 0: raise LinAlgError("singular matrix") if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'getrf|getri' % -info) return inv_a # Determinant def det(a, overwrite_a=False, check_finite=True): """ Compute the determinant of a matrix The determinant of a square matrix is a value derived arithmetically from the coefficients of the matrix. The determinant for a 3x3 matrix, for example, is computed as follows:: a b c d e f = A g h i det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h Parameters ---------- a : (M, M) array_like A square matrix. overwrite_a : bool, optional Allow overwriting data in a (may enhance performance). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- det : float or complex Determinant of `a`. Notes ----- The determinant is computed via LU factorization, LAPACK routine z/dgetrf. Examples -------- >>> from scipy import linalg >>> a = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> linalg.det(a) 0.0 >>> a = np.array([[0,2,3], [4,5,6], [7,8,9]]) >>> linalg.det(a) 3.0 """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or _datacopied(a1, a) fdet, = get_flinalg_funcs(('det',), (a1,)) a_det, info = fdet(a1, overwrite_a=overwrite_a) if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'det.getrf' % -info) return a_det # Linear Least Squares class LstsqLapackError(LinAlgError): pass def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, check_finite=True, lapack_driver=None): """ Compute least-squares solution to equation Ax = b. Compute a vector x such that the 2-norm ``|b - A x|`` is minimized. Parameters ---------- a : (M, N) array_like Left hand side matrix (2-D array). b : (M,) or (M, K) array_like Right hand side matrix or vector (1-D or 2-D array). cond : float, optional Cutoff for 'small' singular values; used to determine effective rank of a. Singular values smaller than ``rcond * largest_singular_value`` are considered zero. overwrite_a : bool, optional Discard data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Discard data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. lapack_driver : str, optional Which LAPACK driver is used to solve the least-squares problem. Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default (``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly faster on many problems. ``'gelss'`` was used historically. It is generally slow but uses less memory. .. versionadded:: 0.17.0 Returns ------- x : (N,) or (N, K) ndarray Least-squares solution. Return shape matches shape of `b`. residues : (0,) or () or (K,) ndarray Sums of residues, squared 2-norm for each column in ``b - a x``. If rank of matrix a is ``< N`` or ``N > M``, or ``'gelsy'`` is used, this is a length zero array. If b was 1-D, this is a () shape array (numpy scalar), otherwise the shape is (K,). rank : int Effective rank of matrix `a`. s : (min(M,N),) ndarray or None Singular values of `a`. The condition number of a is ``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used. Raises ------ LinAlgError If computation does not converge. ValueError When parameters are wrong. See Also -------- optimize.nnls : linear least squares with non-negativity constraint Examples -------- >>> from scipy.linalg import lstsq >>> import matplotlib.pyplot as plt Suppose we have the following data: >>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5]) >>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6]) We want to fit a quadratic polynomial of the form ``y = a + b*x**2`` to this data. We first form the "design matrix" M, with a constant column of 1s and a column containing ``x**2``: >>> M = x[:, np.newaxis]**[0, 2] >>> M array([[ 1. , 1. ], [ 1. , 6.25], [ 1. , 12.25], [ 1. , 16. ], [ 1. , 25. ], [ 1. , 49. ], [ 1. , 72.25]]) We want to find the least-squares solution to ``M.dot(p) = y``, where ``p`` is a vector with length 2 that holds the parameters ``a`` and ``b``. >>> p, res, rnk, s = lstsq(M, y) >>> p array([ 0.20925829, 0.12013861]) Plot the data and the fitted curve. >>> plt.plot(x, y, 'o', label='data') >>> xx = np.linspace(0, 9, 101) >>> yy = p[0] + p[1]*xx**2 >>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$') >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend(framealpha=1, shadow=True) >>> plt.grid(alpha=0.25) >>> plt.show() """ a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2: raise ValueError('expected matrix') m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 if m != b1.shape[0]: raise ValueError('incompatible dimensions') if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1)) if n == 0: residues = np.linalg.norm(b1, axis=0)**2 else: residues = np.empty((0,)) return x, residues, 0, np.empty((0,)) driver = lapack_driver if driver is None: driver = lstsq.default_lapack_driver if driver not in ('gelsd', 'gelsy', 'gelss'): raise ValueError('LAPACK driver "%s" is not found' % driver) lapack_func, lapack_lwork = get_lapack_funcs((driver, '%s_lwork' % driver), (a1, b1)) real_data = True if (lapack_func.dtype.kind == 'f') else False if m < n: # need to extend b matrix as it will be filled with # a larger solution matrix if len(b1.shape) == 2: b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype) b2[:m, :] = b1 else: b2 = np.zeros(n, dtype=lapack_func.dtype) b2[:m] = b1 b1 = b2 overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if cond is None: cond = np.finfo(lapack_func.dtype).eps if driver in ('gelss', 'gelsd'): if driver == 'gelss': lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork, overwrite_a=overwrite_a, overwrite_b=overwrite_b) elif driver == 'gelsd': if real_data: lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) if iwork == 0: # this is LAPACK bug 0038: dgelsd does not provide the # size of the iwork array in query mode. This bug was # fixed in LAPACK 3.2.2, released July 21, 2010. mesg = ("internal gelsd driver lwork query error, " "required iwork dimension not returned. " "This is likely the result of LAPACK bug " "0038, fixed in LAPACK 3.2.2 (released " "July 21, 2010). ") if lapack_driver is None: # restart with gelss lstsq.default_lapack_driver = 'gelss' mesg += "Falling back to 'gelss' driver." warn(mesg, RuntimeWarning, stacklevel=2) return lstsq(a, b, cond, overwrite_a, overwrite_b, check_finite, lapack_driver='gelss') # can't proceed, bail out mesg += ("Use a different lapack_driver when calling lstsq" " or upgrade LAPACK.") raise LstsqLapackError(mesg) x, s, rank, info = lapack_func(a1, b1, lwork, iwork, cond, False, False) else: # complex data lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork, cond, False, False) if info > 0: raise LinAlgError("SVD did not converge in Linear Least Squares") if info < 0: raise ValueError('illegal value in %d-th argument of internal %s' % (-info, lapack_driver)) resids = np.asarray([], dtype=x.dtype) if m > n: x1 = x[:n] if rank == n: resids = np.sum(np.abs(x[n:])**2, axis=0) x = x1 return x, resids, rank, s elif driver == 'gelsy': lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) v, x, j, rank, info = lapack_func(a1, b1, jptv, cond, lwork, False, False) if info < 0: raise ValueError("illegal value in %d-th argument of internal " "gelsy" % -info) if m > n: x1 = x[:n] x = x1 return x, np.array([], x.dtype), rank, None lstsq.default_lapack_driver = 'gelsd' def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using a least-squares solver. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. cond, rcond : float, optional Cutoff for 'small' singular values in the least-squares solver. Singular values smaller than ``rcond * largest_singular_value`` are considered zero. return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, M) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If computation does not converge. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(9, 6) >>> B = linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) b = np.identity(a.shape[0], dtype=a.dtype) if rcond is not None: cond = rcond x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False) if return_rank: return x, rank else: return x def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition and including all 'large' singular values. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. cond, rcond : float or None Cutoff for 'small' singular values. Singular values smaller than ``rcond*largest_singular_value`` are considered zero. If None or -1, suitable machine precision is used. return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, M) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If SVD computation does not converge. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(9, 6) >>> B = linalg.pinv2(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps rank = np.sum(s > cond * np.max(s)) u = u[:, :rank] u /= s[:rank] B = np.transpose(np.conjugate(np.dot(u, vh[:rank]))) if return_rank: return B, rank else: return B def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix. Calculate a generalized inverse of a Hermitian or real symmetric matrix using its eigenvalue decomposition and including all eigenvalues with 'large' absolute value. Parameters ---------- a : (N, N) array_like Real symmetric or complex hermetian matrix to be pseudo-inverted cond, rcond : float or None Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of a. (Default: lower) return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, N) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If eigenvalue does not converge Examples -------- >>> from scipy.linalg import pinvh >>> a = np.random.randn(9, 6) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) s, u = decomp.eigh(a, lower=lower, check_finite=False) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps # For Hermitian matrices, singular values equal abs(eigenvalues) above_cutoff = (abs(s) > cond * np.max(abs(s))) psigma_diag = 1.0 / s[above_cutoff] u = u[:, above_cutoff] B = np.dot(u * psigma_diag, np.conjugate(u).T) if return_rank: return B, len(psigma_diag) else: return B def matrix_balance(A, permute=True, scale=True, separate=False, overwrite_a=False): """ Compute a diagonal similarity transformation for row/column balancing. The balancing tries to equalize the row and column 1-norms by applying a similarity transformation such that the magnitude variation of the matrix entries is reflected to the scaling matrices. Moreover, if enabled, the matrix is first permuted to isolate the upper triangular parts of the matrix and, again if scaling is also enabled, only the remaining subblocks are subjected to scaling. The balanced matrix satisfies the following equality .. math:: B = T^{-1} A T The scaling coefficients are approximated to the nearest power of 2 to avoid round-off errors. Parameters ---------- A : (n, n) array_like Square data matrix for the balancing. permute : bool, optional The selector to define whether permutation of A is also performed prior to scaling. scale : bool, optional The selector to turn on and off the scaling. If False, the matrix will not be scaled. separate : bool, optional This switches from returning a full matrix of the transformation to a tuple of two separate 1D permutation and scaling arrays. overwrite_a : bool, optional This is passed to xGEBAL directly. Essentially, overwrites the result to the data. It might increase the space efficiency. See LAPACK manual for details. This is False by default. Returns ------- B : (n, n) ndarray Balanced matrix T : (n, n) ndarray A possibly permuted diagonal matrix whose nonzero entries are integer powers of 2 to avoid numerical truncation errors. scale, perm : (n,) ndarray If ``separate`` keyword is set to True then instead of the array ``T`` above, the scaling and the permutation vectors are given separately as a tuple without allocating the full array ``T``. .. versionadded:: 0.19.0 Notes ----- This algorithm is particularly useful for eigenvalue and matrix decompositions and in many cases it is already called by various LAPACK routines. The algorithm is based on the well-known technique of [1]_ and has been modified to account for special cases. See [2]_ for details which have been implemented since LAPACK v3.5.0. Before this version there are corner cases where balancing can actually worsen the conditioning. See [3]_ for such examples. The code is a wrapper around LAPACK's xGEBAL routine family for matrix balancing. Examples -------- >>> from scipy import linalg >>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]]) >>> y, permscale = linalg.matrix_balance(x) >>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1) array([ 3.66666667, 0.4995005 , 0.91312162]) >>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1) array([ 1.2 , 1.27041742, 0.92658316]) # may vary >>> permscale # only powers of 2 (0.5 == 2^(-1)) array([[ 0.5, 0. , 0. ], # may vary [ 0. , 1. , 0. ], [ 0. , 0. , 1. ]]) References ---------- .. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik, Vol.13(4), 1969, DOI:10.1007/BF02165404 .. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and eigenvector computation", 2014, Available online: http://arxiv.org/abs/1401.5766 .. [3] : D.S. Watkins. A case where balancing is harmful. Electron. Trans. Numer. Anal, Vol.23, 2006. """ A = np.atleast_2d(_asarray_validated(A, check_finite=True)) if not np.equal(*A.shape): raise ValueError('The data matrix for balancing should be square.') gebal = get_lapack_funcs(('gebal'), (A,)) B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute, overwrite_a=overwrite_a) if info < 0: raise ValueError('xGEBAL exited with the internal error ' '"illegal value in argument number {}.". See ' 'LAPACK documentation for the xGEBAL error codes.' ''.format(-info)) # Separate the permutations from the scalings and then convert to int scaling = np.ones_like(ps, dtype=float) scaling[lo:hi+1] = ps[lo:hi+1] # gebal uses 1-indexing ps = ps.astype(int, copy=False) - 1 n = A.shape[0] perm = np.arange(n) # LAPACK permutes with the ordering n --> hi, then 0--> lo if hi < n: for ind, x in enumerate(ps[hi+1:][::-1], 1): if n-ind == x: continue perm[[x, n-ind]] = perm[[n-ind, x]] if lo > 0: for ind, x in enumerate(ps[:lo]): if ind == x: continue perm[[x, ind]] = perm[[ind, x]] if separate: return B, (scaling, perm) # get the inverse permutation iperm = np.empty_like(perm) iperm[perm] = np.arange(n) return B, np.diag(scaling)[iperm, :]
56,574
33.901295
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/_decomp_polar.py
from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import svd __all__ = ['polar'] def polar(a, side="right"): """ Compute the polar decomposition. Returns the factors of the polar decomposition [1]_ `u` and `p` such that ``a = up`` (if `side` is "right") or ``a = pu`` (if `side` is "left"), where `p` is positive semidefinite. Depending on the shape of `a`, either the rows or columns of `u` are orthonormal. When `a` is a square array, `u` is a square unitary array. When `a` is not square, the "canonical polar decomposition" [2]_ is computed. Parameters ---------- a : (m, n) array_like The array to be factored. side : {'left', 'right'}, optional Determines whether a right or left polar decomposition is computed. If `side` is "right", then ``a = up``. If `side` is "left", then ``a = pu``. The default is "right". Returns ------- u : (m, n) ndarray If `a` is square, then `u` is unitary. If m > n, then the columns of `a` are orthonormal, and if m < n, then the rows of `u` are orthonormal. p : ndarray `p` is Hermitian positive semidefinite. If `a` is nonsingular, `p` is positive definite. The shape of `p` is (n, n) or (m, m), depending on whether `side` is "right" or "left", respectively. References ---------- .. [1] R. A. Horn and C. R. Johnson, "Matrix Analysis", Cambridge University Press, 1985. .. [2] N. J. Higham, "Functions of Matrices: Theory and Computation", SIAM, 2008. Examples -------- >>> from scipy.linalg import polar >>> a = np.array([[1, -1], [2, 4]]) >>> u, p = polar(a) >>> u array([[ 0.85749293, -0.51449576], [ 0.51449576, 0.85749293]]) >>> p array([[ 1.88648444, 1.2004901 ], [ 1.2004901 , 3.94446746]]) A non-square example, with m < n: >>> b = np.array([[0.5, 1, 2], [1.5, 3, 4]]) >>> u, p = polar(b) >>> u array([[-0.21196618, -0.42393237, 0.88054056], [ 0.39378971, 0.78757942, 0.4739708 ]]) >>> p array([[ 0.48470147, 0.96940295, 1.15122648], [ 0.96940295, 1.9388059 , 2.30245295], [ 1.15122648, 2.30245295, 3.65696431]]) >>> u.dot(p) # Verify the decomposition. array([[ 0.5, 1. , 2. ], [ 1.5, 3. , 4. ]]) >>> u.dot(u.T) # The rows of u are orthonormal. array([[ 1.00000000e+00, -2.07353665e-17], [ -2.07353665e-17, 1.00000000e+00]]) Another non-square example, with m > n: >>> c = b.T >>> u, p = polar(c) >>> u array([[-0.21196618, 0.39378971], [-0.42393237, 0.78757942], [ 0.88054056, 0.4739708 ]]) >>> p array([[ 1.23116567, 1.93241587], [ 1.93241587, 4.84930602]]) >>> u.dot(p) # Verify the decomposition. array([[ 0.5, 1.5], [ 1. , 3. ], [ 2. , 4. ]]) >>> u.T.dot(u) # The columns of u are orthonormal. array([[ 1.00000000e+00, -1.26363763e-16], [ -1.26363763e-16, 1.00000000e+00]]) """ if side not in ['right', 'left']: raise ValueError("`side` must be either 'right' or 'left'") a = np.asarray(a) if a.ndim != 2: raise ValueError("`a` must be a 2-D array.") w, s, vh = svd(a, full_matrices=False) u = w.dot(vh) if side == 'right': # a = up p = (vh.T.conj() * s).dot(vh) else: # a = pu p = (w * s).dot(w.T.conj()) return u, p
3,623
31.070796
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_interpolative.py
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** import scipy.linalg.interpolative as pymatrixid import numpy as np from scipy.linalg import hilbert, svdvals, norm from scipy.sparse.linalg import aslinearoperator import time from numpy.testing import assert_, assert_allclose from pytest import raises as assert_raises def _debug_print(s): if 0: print(s) class TestInterpolativeDecomposition(object): def test_id(self): for dtype in [np.float64, np.complex128]: self.check_id(dtype) def check_id(self, dtype): # Test ID routines on a Hilbert matrix. # set parameters n = 300 eps = 1e-12 # construct Hilbert matrix A = hilbert(n).astype(dtype) if np.issubdtype(dtype, np.complexfloating): A = A * (1 + 1j) L = aslinearoperator(A) # find rank S = np.linalg.svd(A, compute_uv=False) try: rank = np.nonzero(S < eps)[0][0] except: rank = n # print input summary _debug_print("Hilbert matrix dimension: %8i" % n) _debug_print("Working precision: %8.2e" % eps) _debug_print("Rank to working precision: %8i" % rank) # set print format fmt = "%8.2e (s) / %5s" # test real ID routines _debug_print("-----------------------------------------") _debug_print("Real ID routines") _debug_print("-----------------------------------------") # fixed precision _debug_print("Calling iddp_id / idzp_id ...",) t0 = time.clock() k, idx, proj = pymatrixid.interp_decomp(A, eps, rand=False) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_aid / idzp_aid ...",) t0 = time.clock() k, idx, proj = pymatrixid.interp_decomp(A, eps) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_rid / idzp_rid ...",) t0 = time.clock() k, idx, proj = pymatrixid.interp_decomp(L, eps) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # fixed rank k = rank _debug_print("Calling iddr_id / idzr_id ...",) t0 = time.clock() idx, proj = pymatrixid.interp_decomp(A, k, rand=False) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_aid / idzr_aid ...",) t0 = time.clock() idx, proj = pymatrixid.interp_decomp(A, k) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_rid / idzr_rid ...",) t0 = time.clock() idx, proj = pymatrixid.interp_decomp(L, k) t = time.clock() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # check skeleton and interpolation matrices idx, proj = pymatrixid.interp_decomp(A, k, rand=False) P = pymatrixid.reconstruct_interp_matrix(idx, proj) B = pymatrixid.reconstruct_skel_matrix(A, k, idx) assert_(np.allclose(B, A[:,idx[:k]], eps)) assert_(np.allclose(B.dot(P), A, eps)) # test SVD routines _debug_print("-----------------------------------------") _debug_print("SVD routines") _debug_print("-----------------------------------------") # fixed precision _debug_print("Calling iddp_svd / idzp_svd ...",) t0 = time.clock() U, S, V = pymatrixid.svd(A, eps, rand=False) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_asvd / idzp_asvd...",) t0 = time.clock() U, S, V = pymatrixid.svd(A, eps) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_rsvd / idzp_rsvd...",) t0 = time.clock() U, S, V = pymatrixid.svd(L, eps) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # fixed rank k = rank _debug_print("Calling iddr_svd / idzr_svd ...",) t0 = time.clock() U, S, V = pymatrixid.svd(A, k, rand=False) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_asvd / idzr_asvd ...",) t0 = time.clock() U, S, V = pymatrixid.svd(A, k) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_rsvd / idzr_rsvd ...",) t0 = time.clock() U, S, V = pymatrixid.svd(L, k) t = time.clock() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # ID to SVD idx, proj = pymatrixid.interp_decomp(A, k, rand=False) Up, Sp, Vp = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj) B = U.dot(np.diag(S).dot(V.T.conj())) assert_(np.allclose(A, B, eps)) # Norm estimates s = svdvals(A) norm_2_est = pymatrixid.estimate_spectral_norm(A) assert_(np.allclose(norm_2_est, s[0], 1e-6)) B = A.copy() B[:,0] *= 1.2 s = svdvals(A - B) norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B) assert_(np.allclose(norm_2_est, s[0], 1e-6)) # Rank estimates B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=dtype) for M in [A, B]: ML = aslinearoperator(M) rank_tol = 1e-9 rank_np = np.linalg.matrix_rank(M, norm(M, 2)*rank_tol) rank_est = pymatrixid.estimate_rank(M, rank_tol) rank_est_2 = pymatrixid.estimate_rank(ML, rank_tol) assert_(rank_est >= rank_np) assert_(rank_est <= rank_np + 10) assert_(rank_est_2 >= rank_np - 4) assert_(rank_est_2 <= rank_np + 4) def test_rand(self): pymatrixid.seed('default') assert_(np.allclose(pymatrixid.rand(2), [0.8932059, 0.64500803], 1e-4)) pymatrixid.seed(1234) x1 = pymatrixid.rand(2) assert_(np.allclose(x1, [0.7513823, 0.06861718], 1e-4)) np.random.seed(1234) pymatrixid.seed() x2 = pymatrixid.rand(2) np.random.seed(1234) pymatrixid.seed(np.random.rand(55)) x3 = pymatrixid.rand(2) assert_allclose(x1, x2) assert_allclose(x1, x3) def test_badcall(self): A = hilbert(5).astype(np.float32) assert_raises(ValueError, pymatrixid.interp_decomp, A, 1e-6, rand=False) def test_rank_too_large(self): # svd(array, k) should not segfault a = np.ones((4, 3)) with assert_raises(ValueError): pymatrixid.svd(a, 4)
9,720
36.532819
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_matfuncs.py
# # Created by: Pearu Peterson, March 2002 # """ Test functions for linalg.matfuncs module """ from __future__ import division, print_function, absolute_import import random import functools import numpy as np from numpy import array, matrix, identity, dot, sqrt, double from numpy.testing import ( assert_array_equal, assert_array_less, assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_allclose, assert_) import pytest from scipy._lib._numpy_compat import _assert_warns, suppress_warnings import scipy.linalg from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power, expm, expm_frechet, expm_cond, norm) from scipy.linalg import _matfuncs_inv_ssq import scipy.linalg._expm_frechet from scipy.optimize import minimize def _get_al_mohy_higham_2012_experiment_1(): """ Return the test matrix from Experiment (1) of [1]_. References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 """ A = np.array([ [3.2346e-1, 3e4, 3e4, 3e4], [0, 3.0089e-1, 3e4, 3e4], [0, 0, 3.2210e-1, 3e4], [0, 0, 0, 3.0744e-1]], dtype=float) return A class TestSignM(object): def test_nils(self): a = array([[29.2, -24.2, 69.5, 49.8, 7.], [-9.2, 5.2, -18., -16.8, -2.], [-10., 6., -20., -18., -2.], [-9.6, 9.6, -25.5, -15.4, -2.], [9.8, -4.8, 18., 18.2, 2.]]) cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333], [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667], [-4.08,0.56,-4.92,-7.6,0.56], [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667], [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]]) r = signm(a) assert_array_almost_equal(r,cr) def test_defective1(self): a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]]) r = signm(a, disp=False) #XXX: what would be the correct result? def test_defective2(self): a = array(( [29.2,-24.2,69.5,49.8,7.0], [-9.2,5.2,-18.0,-16.8,-2.0], [-10.0,6.0,-20.0,-18.0,-2.0], [-9.6,9.6,-25.5,-15.4,-2.0], [9.8,-4.8,18.0,18.2,2.0])) r = signm(a, disp=False) #XXX: what would be the correct result? def test_defective3(self): a = array([[-2., 25., 0., 0., 0., 0., 0.], [0., -3., 10., 3., 3., 3., 0.], [0., 0., 2., 15., 3., 3., 0.], [0., 0., 0., 0., 15., 3., 0.], [0., 0., 0., 0., 3., 10., 0.], [0., 0., 0., 0., 0., -2., 25.], [0., 0., 0., 0., 0., 0., -3.]]) r = signm(a, disp=False) #XXX: what would be the correct result? class TestLogM(object): def test_nils(self): a = array([[-2., 25., 0., 0., 0., 0., 0.], [0., -3., 10., 3., 3., 3., 0.], [0., 0., 2., 15., 3., 3., 0.], [0., 0., 0., 0., 15., 3., 0.], [0., 0., 0., 0., 3., 10., 0.], [0., 0., 0., 0., 0., -2., 25.], [0., 0., 0., 0., 0., 0., -3.]]) m = (identity(7)*3.1+0j)-a logm(m, disp=False) #XXX: what would be the correct result? def test_al_mohy_higham_2012_experiment_1_logm(self): # The logm completes the round trip successfully. # Note that the expm leg of the round trip is badly conditioned. A = _get_al_mohy_higham_2012_experiment_1() A_logm, info = logm(A, disp=False) A_round_trip = expm(A_logm) assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14) def test_al_mohy_higham_2012_experiment_1_funm_log(self): # The raw funm with np.log does not complete the round trip. # Note that the expm leg of the round trip is badly conditioned. A = _get_al_mohy_higham_2012_experiment_1() A_funm_log, info = funm(A, np.log, disp=False) A_round_trip = expm(A_funm_log) assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)) def test_round_trip_random_float(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale # Eigenvalues are related to the branch cut. W = np.linalg.eigvals(M) err_msg = 'M:{0} eivals:{1}'.format(M, W) # Check sqrtm round trip because it is used within logm. M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) # Check logm round trip. M_logm, info = logm(M, disp=False) M_logm_round_trip = expm(M_logm) assert_allclose(M_logm_round_trip, M, err_msg=err_msg) def test_round_trip_random_complex(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_logm, info = logm(M, disp=False) M_round_trip = expm(M_logm) assert_allclose(M_round_trip, M) def test_logm_type_preservation_and_conversion(self): # The logm matrix function should preserve the type of a matrix # whose eigenvalues are positive with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # check float type preservation A = np.array(matrix_as_list, dtype=float) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char in complex_dtype_chars) # check float->complex type conversion for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char in complex_dtype_chars) def test_complex_spectrum_real_logm(self): # This matrix has complex eigenvalues and real logm. # Its output dtype depends on its input dtype. M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]] for dt in float, complex: X = np.array(M, dtype=dt) w = scipy.linalg.eigvals(X) assert_(1e-2 < np.absolute(w.imag).sum()) Y, info = logm(X, disp=False) assert_(np.issubdtype(Y.dtype, np.inexact)) assert_allclose(expm(Y), X) def test_real_mixed_sign_spectrum(self): # These matrices have real eigenvalues with mixed signs. # The output logm dtype is complex, regardless of input dtype. for M in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]]): for dt in float, complex: A = np.array(M, dtype=dt) A_logm, info = logm(A, disp=False) assert_(np.issubdtype(A_logm.dtype, np.complexfloating)) def test_exactly_singular(self): A = np.array([[0, 0], [1j, 1j]]) B = np.asarray([[1, 1], [0, 0]]) for M in A, A.T, B, B.T: expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning L, info = _assert_warns(expected_warning, logm, M, disp=False) E = expm(L) assert_allclose(E, M, atol=1e-14) def test_nearly_singular(self): M = np.array([[1e-100]]) expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning L, info = _assert_warns(expected_warning, logm, M, disp=False) E = expm(L) assert_allclose(E, M, atol=1e-14) def test_opposite_sign_complex_eigenvalues(self): # See gh-6113 E = [[0, 1], [-1, 0]] L = [[0, np.pi*0.5], [-np.pi*0.5, 0]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) E = [[1j, 4], [0, -1j]] L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) E = [[1j, 0], [0, -1j]] L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) class TestSqrtM(object): def test_round_trip_random_float(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) def test_round_trip_random_complex(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) def test_bad(self): # See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz e = 2**-5 se = sqrt(e) a = array([[1.0,0,0,1], [0,e,0,0], [0,0,e,0], [0,0,0,1]]) sa = array([[1,0,0,0.5], [0,se,0,0], [0,0,se,0], [0,0,0,1]]) n = a.shape[0] assert_array_almost_equal(dot(sa,sa),a) # Check default sqrtm. esa = sqrtm(a, disp=False, blocksize=n)[0] assert_array_almost_equal(dot(esa,esa),a) # Check sqrtm with 2x2 blocks. esa = sqrtm(a, disp=False, blocksize=2)[0] assert_array_almost_equal(dot(esa,esa),a) def test_sqrtm_type_preservation_and_conversion(self): # The sqrtm matrix function should preserve the type of a matrix # whose eigenvalues are nonnegative with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]], [[1, 1], [1, 1]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # check float type preservation A = np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) # check float->complex type conversion for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self): complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(any(w.imag or w.real < 0 for w in W)) # check complex->complex A = np.array(matrix_as_list, dtype=complex) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) # check float->complex A = np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) def test_blocksizes(self): # Make sure I do not goof up the blocksizes when they do not divide n. np.random.seed(1234) for n in range(1, 8): A = np.random.rand(n, n) + 1j*np.random.randn(n, n) A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n) assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2)) for blocksize in range(1, 10): A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize) assert_allclose(A_sqrtm_default, A_sqrtm_new) def test_al_mohy_higham_2012_experiment_1(self): # Matrix square root of a tricky upper triangular matrix. A = _get_al_mohy_higham_2012_experiment_1() A_sqrtm, info = sqrtm(A, disp=False) A_round_trip = A_sqrtm.dot(A_sqrtm) assert_allclose(A_round_trip, A, rtol=1e-5) assert_allclose(np.tril(A_round_trip), np.tril(A)) def test_strict_upper_triangular(self): # This matrix has no square root. for dt in int, float: A = np.array([ [0, 3, 0, 0], [0, 0, 3, 0], [0, 0, 0, 3], [0, 0, 0, 0]], dtype=dt) A_sqrtm, info = sqrtm(A, disp=False) assert_(np.isnan(A_sqrtm).all()) def test_weird_matrix(self): # The square root of matrix B exists. for dt in int, float: A = np.array([ [0, 0, 1], [0, 0, 0], [0, 1, 0]], dtype=dt) B = np.array([ [0, 1, 0], [0, 0, 0], [0, 0, 0]], dtype=dt) assert_array_equal(B, A.dot(A)) # But scipy sqrtm is not clever enough to find it. B_sqrtm, info = sqrtm(B, disp=False) assert_(np.isnan(B_sqrtm).all()) def test_disp(self): from io import StringIO np.random.seed(1234) A = np.random.rand(3, 3) B = sqrtm(A, disp=True) assert_allclose(B.dot(B), A) def test_opposite_sign_complex_eigenvalues(self): M = [[2j, 4], [0, -2j]] R = [[1+1j, 2], [0, 1-1j]] assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(sqrtm(M), R, atol=1e-14) def test_gh4866(self): M = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]) R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)], [0, 0, 0, 0], [0, 0, 0, 0], [sqrt(0.5), 0, 0, sqrt(0.5)]]) assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(sqrtm(M), R, atol=1e-14) def test_gh5336(self): M = np.diag([2, 1, 0]) R = np.diag([sqrt(2), 1, 0]) assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(sqrtm(M), R, atol=1e-14) def test_gh7839(self): M = np.zeros((2, 2)) R = np.zeros((2, 2)) assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(sqrtm(M), R, atol=1e-14) class TestFractionalMatrixPower(object): def test_round_trip_random_complex(self): np.random.seed(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_round_trip_random_float(self): # This test is more annoying because it can hit the branch cut; # this happens when the matrix has an eigenvalue # with no imaginary component and with a real negative component, # and it means that the principal branch does not exist. np.random.seed(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_larger_abs_fractional_matrix_powers(self): np.random.seed(1234) for n in (2, 3, 5): for i in range(10): M = np.random.randn(n, n) + 1j * np.random.randn(n, n) M_one_fifth = fractional_matrix_power(M, 0.2) # Test the round trip. M_round_trip = np.linalg.matrix_power(M_one_fifth, 5) assert_allclose(M, M_round_trip) # Test a large abs fractional power. X = fractional_matrix_power(M, -5.4) Y = np.linalg.matrix_power(M_one_fifth, -27) assert_allclose(X, Y) # Test another large abs fractional power. X = fractional_matrix_power(M, 3.8) Y = np.linalg.matrix_power(M_one_fifth, 19) assert_allclose(X, Y) def test_random_matrices_and_powers(self): # Each independent iteration of this fuzz test picks random parameters. # It tries to hit some edge cases. np.random.seed(1234) nsamples = 20 for i in range(nsamples): # Sample a matrix size and a random real power. n = random.randrange(1, 5) p = np.random.randn() # Sample a random real or complex matrix. matrix_scale = np.exp(random.randrange(-4, 5)) A = np.random.randn(n, n) if random.choice((True, False)): A = A + 1j * np.random.randn(n, n) A = A * matrix_scale # Check a couple of analytically equivalent ways # to compute the fractional matrix power. # These can be compared because they both use the principal branch. A_power = fractional_matrix_power(A, p) A_logm, info = logm(A, disp=False) A_power_expm_logm = expm(A_logm * p) assert_allclose(A_power, A_power_expm_logm) def test_al_mohy_higham_2012_experiment_1(self): # Fractional powers of a tricky upper triangular matrix. A = _get_al_mohy_higham_2012_experiment_1() # Test remainder matrix power. A_funm_sqrt, info = funm(A, np.sqrt, disp=False) A_sqrtm, info = sqrtm(A, disp=False) A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5) A_power = fractional_matrix_power(A, 0.5) assert_array_equal(A_rem_power, A_power) assert_allclose(A_sqrtm, A_power) assert_allclose(A_sqrtm, A_funm_sqrt) # Test more fractional powers. for p in (1/2, 5/3): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A, rtol=1e-2) assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1)) def test_briggs_helper_function(self): np.random.seed(1234) for a in np.random.randn(10) + 1j * np.random.randn(10): for k in range(5): x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k) x_expected = a ** np.exp2(-k) - 1 assert_allclose(x_observed, x_expected) def test_type_preservation_and_conversion(self): # The fractional_matrix_power matrix function should preserve # the type of a matrix whose eigenvalues # are positive with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check float type preservation A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) def test_type_conversion_mixed_sign_or_complex_spectrum(self): complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check complex->complex A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) @pytest.mark.xfail(reason='Too unstable across LAPACKs.') def test_singular(self): # Negative fractional powers do not work with singular matrices. for matrix_as_list in ( [[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 2], [3, 6]], [[0, 0, 0], [0, 1, 1], [0, -1, 1]]): # Check fractional powers both for float and for complex types. for newtype in (float, complex): A = np.array(matrix_as_list, dtype=newtype) for p in (-0.7, -0.9, -2.4, -1.3): A_power = fractional_matrix_power(A, p) assert_(np.isnan(A_power).all()) for p in (0.2, 1.43): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A) def test_opposite_sign_complex_eigenvalues(self): M = [[2j, 4], [0, -2j]] R = [[1+1j, 2], [0, 1-1j]] assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14) class TestExpM(object): def test_zero(self): a = array([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) def test_single_elt(self): # See gh-5853 from scipy.sparse import csc_matrix vOne = -2.02683397006j vTwo = -2.12817566856j mOne = csc_matrix([[vOne]], dtype='complex') mTwo = csc_matrix([[vTwo]], dtype='complex') outOne = expm(mOne) outTwo = expm(mTwo) assert_equal(type(outOne), type(mOne)) assert_equal(type(outTwo), type(mTwo)) assert_allclose(outOne[0, 0], complex(-0.44039415155949196, -0.8978045395698304)) assert_allclose(outTwo[0, 0], complex(-0.52896401032626006, -0.84864425749518878)) class TestExpmFrechet(object): def test_expm_frechet(self): # a test of the basic functionality M = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 1, 2], [0, 0, 5, 6], ], dtype=float) A = np.array([ [1, 2], [5, 6], ], dtype=float) E = np.array([ [3, 4], [7, 8], ], dtype=float) expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:2, 2:] for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}): observed_expm, observed_frechet = expm_frechet(A, E, **kwargs) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_small_norm_expm_frechet(self): # methodically test matrices with a range of norms, for better coverage M_original = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 1, 2], [0, 0, 5, 6], ], dtype=float) A_original = np.array([ [1, 2], [5, 6], ], dtype=float) E_original = np.array([ [3, 4], [7, 8], ], dtype=float) A_original_norm_1 = scipy.linalg.norm(A_original, 1) selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15] m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:]) for ma, mb in m_neighbor_pairs: ell_a = scipy.linalg._expm_frechet.ell_table_61[ma] ell_b = scipy.linalg._expm_frechet.ell_table_61[mb] target_norm_1 = 0.5 * (ell_a + ell_b) scale = target_norm_1 / A_original_norm_1 M = scale * M_original A = scale * A_original E = scale * E_original expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:2, 2:] observed_expm, observed_frechet = expm_frechet(A, E) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_fuzz(self): # try a bunch of crazy inputs rfuncs = ( np.random.uniform, np.random.normal, np.random.standard_cauchy, np.random.exponential) ntests = 100 for i in range(ntests): rfunc = random.choice(rfuncs) target_norm_1 = random.expovariate(1.0) n = random.randrange(2, 16) A_original = rfunc(size=(n,n)) E_original = rfunc(size=(n,n)) A_original_norm_1 = scipy.linalg.norm(A_original, 1) scale = target_norm_1 / A_original_norm_1 A = scale * A_original E = scale * E_original M = np.vstack([ np.hstack([A, E]), np.hstack([np.zeros_like(A), A])]) expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:n, n:] observed_expm, observed_frechet = expm_frechet(A, E) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_problematic_matrix(self): # this test case uncovered a bug which has since been fixed A = np.array([ [1.50591997, 1.93537998], [0.41203263, 0.23443516], ], dtype=float) E = np.array([ [1.87864034, 2.07055038], [1.34102727, 0.67341123], ], dtype=float) A_norm_1 = scipy.linalg.norm(A, 1) sps_expm, sps_frechet = expm_frechet( A, E, method='SPS') blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( A, E, method='blockEnlarge') assert_allclose(sps_expm, blockEnlarge_expm) assert_allclose(sps_frechet, blockEnlarge_frechet) @pytest.mark.slow @pytest.mark.skip(reason='this test is deliberately slow') def test_medium_matrix(self): # profile this to see the speed difference n = 1000 A = np.random.exponential(size=(n, n)) E = np.random.exponential(size=(n, n)) sps_expm, sps_frechet = expm_frechet( A, E, method='SPS') blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( A, E, method='blockEnlarge') assert_allclose(sps_expm, blockEnlarge_expm) assert_allclose(sps_frechet, blockEnlarge_frechet) def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p): p = np.reshape(p, A.shape) p_norm = norm(p) perturbation = eps * p * (A_norm / p_norm) X_prime = expm(A + perturbation) scaled_relative_error = norm(X_prime - X) / (X_norm * eps) return -scaled_relative_error def _normalized_like(A, B): return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A)) def _relative_error(f, A, perturbation): X = f(A) X_prime = f(A + perturbation) return norm(X_prime - X) / norm(X) class TestExpmConditionNumber(object): def test_expm_cond_smoke(self): np.random.seed(1234) for n in range(1, 4): A = np.random.randn(n, n) kappa = expm_cond(A) assert_array_less(0, kappa) def test_expm_bad_condition_number(self): A = np.array([ [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14], [0, -1.201010529, 9.634696872e4, -4.681048289e9], [0, 0, -1.132893222, 9.532491830e4], [0, 0, 0, -1.179475332], ]) kappa = expm_cond(A) assert_array_less(1e36, kappa) def test_univariate(self): np.random.seed(12345) for x in np.linspace(-5, 5, num=11): A = np.array([[x]]) assert_allclose(expm_cond(A), abs(x)) for x in np.logspace(-2, 2, num=11): A = np.array([[x]]) assert_allclose(expm_cond(A), abs(x)) for i in range(10): A = np.random.randn(1, 1) assert_allclose(expm_cond(A), np.absolute(A)[0, 0]) @pytest.mark.slow def test_expm_cond_fuzz(self): np.random.seed(12345) eps = 1e-5 nsamples = 10 for i in range(nsamples): n = np.random.randint(2, 5) A = np.random.randn(n, n) A_norm = scipy.linalg.norm(A) X = expm(A) X_norm = scipy.linalg.norm(X) kappa = expm_cond(A) # Look for the small perturbation that gives the greatest # relative error. f = functools.partial(_help_expm_cond_search, A, A_norm, X, X_norm, eps) guess = np.ones(n*n) out = minimize(f, guess, method='L-BFGS-B') xopt = out.x yopt = f(xopt) p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A) p_best_relerr = _relative_error(expm, A, p_best) assert_allclose(p_best_relerr, -yopt * eps) # Check that the identified perturbation indeed gives greater # relative error than random perturbations with similar norms. for j in range(5): p_rand = eps * _normalized_like(np.random.randn(*A.shape), A) assert_allclose(norm(p_best), norm(p_rand)) p_rand_relerr = _relative_error(expm, A, p_rand) assert_array_less(p_rand_relerr, p_best_relerr) # The greatest relative error should not be much greater than # eps times the condition number kappa. # In the limit as eps approaches zero it should never be greater. assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
33,080
38.523297
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_decomp.py
""" Test functions for linalg.decomp module """ from __future__ import division, print_function, absolute_import __usage__ = """ Build linalg: python setup_linalg.py build Run tests if scipy is installed: python -c 'import scipy;scipy.linalg.test()' """ import itertools import numpy as np from numpy.testing import (assert_equal, assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_, assert_allclose) import pytest from pytest import raises as assert_raises from scipy._lib.six import xrange from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr, schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz, subspace_angles, hadamard, eigvalsh_tridiagonal, eigh_tridiagonal, null_space, cdf2rdf) from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \ dsbev, dsbevd, dsbevx, zhbevd, zhbevx from scipy.linalg.misc import norm from scipy.linalg._decomp_qz import _select_function from numpy import array, transpose, sometrue, diag, ones, linalg, \ argsort, zeros, arange, float32, complex64, dot, conj, identity, \ ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \ asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\ triu, tril from numpy.random import normal, seed, random from scipy.linalg._testutils import assert_no_overwrite # digit precision to use in asserts for different types DIGITS = {'d':11, 'D':11, 'f':4, 'F':4} def clear_fuss(ar, fuss_binary_bits=7): """Clears trailing `fuss_binary_bits` of mantissa of a floating number""" x = np.asanyarray(ar) if np.iscomplexobj(x): return clear_fuss(x.real) + 1j * clear_fuss(x.imag) significant_binary_bits = np.finfo(x.dtype).nmant x_mant, x_exp = np.frexp(x) f = 2.0**(significant_binary_bits - fuss_binary_bits) x_mant *= f np.rint(x_mant, out=x_mant) x_mant /= f return np.ldexp(x_mant, x_exp) # XXX: This function should be available through numpy.testing def assert_dtype_equal(act, des): if isinstance(act, ndarray): act = act.dtype else: act = dtype(act) if isinstance(des, ndarray): des = des.dtype else: des = dtype(des) assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des)) # XXX: This function should not be defined here, but somewhere in # scipy.linalg namespace def symrand(dim_or_eigv): """Return a random symmetric (Hermitian) matrix. If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues uniformly distributed on (-1,1). If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose eigenvalues are 'a'. """ if isinstance(dim_or_eigv, int): dim = dim_or_eigv d = random(dim)*2 - 1 elif (isinstance(dim_or_eigv, ndarray) and len(dim_or_eigv.shape) == 1): dim = dim_or_eigv.shape[0] d = dim_or_eigv else: raise TypeError("input type not supported.") v = random_rot(dim) h = dot(dot(v.T.conj(), diag(d)), v) # to avoid roundoff errors, symmetrize the matrix (again) h = 0.5*(h.T+h) return h # XXX: This function should not be defined here, but somewhere in # scipy.linalg namespace def random_rot(dim): """Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(n)). The algorithm is described in the paper Stewart, G.W., 'The efficient generation of random orthogonal matrices with an application to condition estimators', SIAM Journal on Numerical Analysis, 17(3), pp. 403-409, 1980. For more information see http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization""" H = eye(dim) D = ones((dim,)) for n in range(1, dim): x = normal(size=(dim-n+1,)) D[n-1] = sign(x[0]) x[0] -= D[n-1]*sqrt((x*x).sum()) # Householder transformation Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum() mat = eye(dim) mat[n-1:,n-1:] = Hx H = dot(H, mat) # Fix the last sign such that the determinant is 1 D[-1] = -D.prod() H = (D*H.T).T return H class TestEigVals(object): def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] w = eigvals(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) def test_simple_tr(self): a = array([[1,2,3],[1,2,3],[2,5,6]],'d') a = transpose(a).copy() a = transpose(a) w = eigvals(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w = eigvals(a) exact_w = [(9+1j+sqrt(92+6j))/2, 0, (9+1j-sqrt(92+6j))/2] assert_array_almost_equal(w,exact_w) def test_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] w = eigvals(a, check_finite=False) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) class TestEig(object): def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] w,v = eig(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] v0 = array([1,1,(1+sqrt(93)/3)/2]) v1 = array([3.,0,-1]) v2 = array([1,1,(1-sqrt(93)/3)/2]) v0 = v0 / sqrt(dot(v0,transpose(v0))) v1 = v1 / sqrt(dot(v1,transpose(v1))) v2 = v2 / sqrt(dot(v2,transpose(v2))) assert_array_almost_equal(w,exact_w) assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) for i in range(3): assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) w,v = eig(a,left=1,right=0) for i in range(3): assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i]) def test_simple_complex_eig(self): a = [[1,2],[-2,1]] w,vl,vr = eig(a,left=1,right=1) assert_array_almost_equal(w, array([1+2j, 1-2j])) for i in range(2): assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) for i in range(2): assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), conjugate(w[i])*vl[:,i]) def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w,vl,vr = eig(a,left=1,right=1) for i in range(3): assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) for i in range(3): assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), conjugate(w[i])*vl[:,i]) def test_gh_3054(self): a = [[1]] b = [[0]] w, vr = eig(a, b, homogeneous_eigvals=True) assert_allclose(w[1,0], 0) assert_(w[0,0] != 0) assert_allclose(vr, 1) w, vr = eig(a, b) assert_equal(w, np.inf) assert_allclose(vr, 1) def _check_gen_eig(self, A, B): if B is not None: A, B = asarray(A), asarray(B) B0 = B else: A = asarray(A) B0 = B B = np.eye(*A.shape) msg = "\n%r\n%r" % (A, B) # Eigenvalues in homogeneous coordinates w, vr = eig(A, B0, homogeneous_eigvals=True) wt = eigvals(A, B0, homogeneous_eigvals=True) val1 = dot(A, vr) * w[1,:] val2 = dot(B, vr) * w[0,:] for i in range(val1.shape[1]): assert_allclose(val1[:,i], val2[:,i], rtol=1e-13, atol=1e-13, err_msg=msg) if B0 is None: assert_allclose(w[1,:], 1) assert_allclose(wt[1,:], 1) perm = np.lexsort(w) permt = np.lexsort(wt) assert_allclose(w[:,perm], wt[:,permt], atol=1e-7, rtol=1e-7, err_msg=msg) length = np.empty(len(vr)) for i in xrange(len(vr)): length[i] = norm(vr[:,i]) assert_allclose(length, np.ones(length.size), err_msg=msg, atol=1e-7, rtol=1e-7) # Convert homogeneous coordinates beta_nonzero = (w[1,:] != 0) wh = w[0,beta_nonzero] / w[1,beta_nonzero] # Eigenvalues in standard coordinates w, vr = eig(A, B0) wt = eigvals(A, B0) val1 = dot(A, vr) val2 = dot(B, vr) * w res = val1 - val2 for i in range(res.shape[1]): if all(isfinite(res[:,i])): assert_allclose(res[:,i], 0, rtol=1e-13, atol=1e-13, err_msg=msg) w_fin = w[isfinite(w)] wt_fin = wt[isfinite(wt)] perm = argsort(clear_fuss(w_fin)) permt = argsort(clear_fuss(wt_fin)) assert_allclose(w[perm], wt[permt], atol=1e-7, rtol=1e-7, err_msg=msg) length = np.empty(len(vr)) for i in xrange(len(vr)): length[i] = norm(vr[:,i]) assert_allclose(length, np.ones(length.size), err_msg=msg) # Compare homogeneous and nonhomogeneous versions assert_allclose(sort(wh), sort(w[np.isfinite(w)])) @pytest.mark.xfail(reason="See gh-2254.") def test_singular(self): # Example taken from # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34], [27,31,26,21,15], [38,44,44,24,30])) B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25], [16,25,27,14,23], [24,35,18,21,22])) olderr = np.seterr(all='ignore') try: self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_falker(self): # Test matrices giving some Nan generalized eigenvalues. M = diag(array(([1,0,3]))) K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2])) D = array(([1,-1,0],[-1,1,0],[0,0,0])) Z = zeros((3,3)) I3 = identity(3) A = bmat([[I3, Z], [Z, -K]]) B = bmat([[Z, I3], [M, D]]) olderr = np.seterr(all='ignore') try: self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_bad_geneig(self): # Ticket #709 (strange return values from DGGEV) def matrices(omega): c1 = -9 + omega**2 c2 = 2*omega A = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c1, 0], [0, 0, 0, c1]] B = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, -c2], [0, 1, c2, 0]] return A, B # With a buggy LAPACK, this can fail for different omega on different # machines -- so we need to test several values olderr = np.seterr(all='ignore') try: for k in xrange(100): A, B = matrices(omega=k*5./100) self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_make_eigvals(self): # Step through all paths in _make_eigvals seed(1234) # Real eigenvalues A = symrand(3) self._check_gen_eig(A, None) B = symrand(3) self._check_gen_eig(A, B) # Complex eigenvalues A = random((3, 3)) + 1j*random((3, 3)) self._check_gen_eig(A, None) B = random((3, 3)) + 1j*random((3, 3)) self._check_gen_eig(A, B) def test_check_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] w,v = eig(a, check_finite=False) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] v0 = array([1,1,(1+sqrt(93)/3)/2]) v1 = array([3.,0,-1]) v2 = array([1,1,(1-sqrt(93)/3)/2]) v0 = v0 / sqrt(dot(v0,transpose(v0))) v1 = v1 / sqrt(dot(v1,transpose(v1))) v2 = v2 / sqrt(dot(v2,transpose(v2))) assert_array_almost_equal(w,exact_w) assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) for i in range(3): assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) def test_not_square_error(self): """Check that passing a non-square array raises a ValueError.""" A = np.arange(6).reshape(3,2) assert_raises(ValueError, eig, A) def test_shape_mismatch(self): """Check that passing arrays of with different shapes raises a ValueError.""" A = identity(2) B = np.arange(9.0).reshape(3,3) assert_raises(ValueError, eig, A, B) assert_raises(ValueError, eig, B, A) class TestEigBanded(object): def setup_method(self): self.create_bandmat() def create_bandmat(self): """Create the full matrix `self.fullmat` and the corresponding band matrix `self.bandmat`.""" N = 10 self.KL = 2 # number of subdiagonals (below the diagonal) self.KU = 2 # number of superdiagonals (above the diagonal) # symmetric band matrix self.sym_mat = (diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1) + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # hermitian band matrix self.herm_mat = (diag(-1.0*ones(N)) + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1) + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # general real band matrix self.real_mat = (diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1) + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # general complex band matrix self.comp_mat = (1j*diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1) + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # Eigenvalues and -vectors from linalg.eig ew, ev = linalg.eig(self.sym_mat) ew = ew.real args = argsort(ew) self.w_sym_lin = ew[args] self.evec_sym_lin = ev[:,args] ew, ev = linalg.eig(self.herm_mat) ew = ew.real args = argsort(ew) self.w_herm_lin = ew[args] self.evec_herm_lin = ev[:,args] # Extract upper bands from symmetric and hermitian band matrices # (for use in dsbevd, dsbevx, zhbevd, zhbevx # and their single precision versions) LDAB = self.KU + 1 self.bandmat_sym = zeros((LDAB, N), dtype=float) self.bandmat_herm = zeros((LDAB, N), dtype=complex) for i in xrange(LDAB): self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i) self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i) # Extract bands from general real and complex band matrix # (for use in dgbtrf, dgbtrs and their single precision versions) LDAB = 2*self.KL + self.KU + 1 self.bandmat_real = zeros((LDAB, N), dtype=float) self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal for i in xrange(self.KL): # superdiagonals self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1) # subdiagonals self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1) self.bandmat_comp = zeros((LDAB, N), dtype=complex) self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal for i in xrange(self.KL): # superdiagonals self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1) # subdiagonals self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1) # absolute value for linear equation system A*x = b self.b = 1.0*arange(N) self.bc = self.b * (1 + 1j) ##################################################################### def test_dsbev(self): """Compare dsbev eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = dsbev(self.bandmat_sym, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_dsbevd(self): """Compare dsbevd eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_dsbevx(self): """Compare dsbevx eigenvalues and eigenvectors with the result of linalg.eig.""" N,N = shape(self.sym_mat) ## Achtung: Argumente 0.0,0.0,range? w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, compute_v=1, range=2) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_zhbevd(self): """Compare zhbevd eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_herm_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) def test_zhbevx(self): """Compare zhbevx eigenvalues and eigenvectors with the result of linalg.eig.""" N,N = shape(self.herm_mat) ## Achtung: Argumente 0.0,0.0,range? w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, compute_v=1, range=2) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_herm_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) def test_eigvals_banded(self): """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" w_sym = eigvals_banded(self.bandmat_sym) w_sym = w_sym.real assert_array_almost_equal(sort(w_sym), self.w_sym_lin) w_herm = eigvals_banded(self.bandmat_herm) w_herm = w_herm.real assert_array_almost_equal(sort(w_herm), self.w_herm_lin) # extracting eigenvalues with respect to an index range ind1 = 2 ind2 = np.longlong(6) w_sym_ind = eigvals_banded(self.bandmat_sym, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_sym_ind), self.w_sym_lin[ind1:ind2+1]) w_herm_ind = eigvals_banded(self.bandmat_herm, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_herm_ind), self.w_herm_lin[ind1:ind2+1]) # extracting eigenvalues with respect to a value range v_lower = self.w_sym_lin[ind1] - 1.0e-5 v_upper = self.w_sym_lin[ind2] + 1.0e-5 w_sym_val = eigvals_banded(self.bandmat_sym, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_sym_val), self.w_sym_lin[ind1:ind2+1]) v_lower = self.w_herm_lin[ind1] - 1.0e-5 v_upper = self.w_herm_lin[ind2] + 1.0e-5 w_herm_val = eigvals_banded(self.bandmat_herm, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_herm_val), self.w_herm_lin[ind1:ind2+1]) w_sym = eigvals_banded(self.bandmat_sym, check_finite=False) w_sym = w_sym.real assert_array_almost_equal(sort(w_sym), self.w_sym_lin) def test_eig_banded(self): """Compare eigenvalues and eigenvectors of eig_banded with those of linalg.eig. """ w_sym, evec_sym = eig_banded(self.bandmat_sym) evec_sym_ = evec_sym[:,argsort(w_sym.real)] assert_array_almost_equal(sort(w_sym), self.w_sym_lin) assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) w_herm, evec_herm = eig_banded(self.bandmat_herm) evec_herm_ = evec_herm[:,argsort(w_herm.real)] assert_array_almost_equal(sort(w_herm), self.w_herm_lin) assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) # extracting eigenvalues with respect to an index range ind1 = 2 ind2 = 6 w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_sym_ind), self.w_sym_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_sym_ind), abs(self.evec_sym_lin[:,ind1:ind2+1])) w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_herm_ind), self.w_herm_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_herm_ind), abs(self.evec_herm_lin[:,ind1:ind2+1])) # extracting eigenvalues with respect to a value range v_lower = self.w_sym_lin[ind1] - 1.0e-5 v_upper = self.w_sym_lin[ind2] + 1.0e-5 w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_sym_val), self.w_sym_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_sym_val), abs(self.evec_sym_lin[:,ind1:ind2+1])) v_lower = self.w_herm_lin[ind1] - 1.0e-5 v_upper = self.w_herm_lin[ind2] + 1.0e-5 w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_herm_val), self.w_herm_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_herm_val), abs(self.evec_herm_lin[:,ind1:ind2+1])) w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False) evec_sym_ = evec_sym[:,argsort(w_sym.real)] assert_array_almost_equal(sort(w_sym), self.w_sym_lin) assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) def test_dgbtrf(self): """Compare dgbtrf LU factorisation with the LU factorisation result of linalg.lu.""" M,N = shape(self.real_mat) lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) # extract matrix u from lu_symm_band u = diag(lu_symm_band[2*self.KL,:]) for i in xrange(self.KL + self.KU): u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) assert_array_almost_equal(u, u_lin) def test_zgbtrf(self): """Compare zgbtrf LU factorisation with the LU factorisation result of linalg.lu.""" M,N = shape(self.comp_mat) lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) # extract matrix u from lu_symm_band u = diag(lu_symm_band[2*self.KL,:]) for i in xrange(self.KL + self.KU): u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0) assert_array_almost_equal(u, u_lin) def test_dgbtrs(self): """Compare dgbtrs solutions for linear equation system A*x = b with solutions of linalg.solve.""" lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) y_lin = linalg.solve(self.real_mat, self.b) assert_array_almost_equal(y, y_lin) def test_zgbtrs(self): """Compare zgbtrs solutions for linear equation system A*x = b with solutions of linalg.solve.""" lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) y_lin = linalg.solve(self.comp_mat, self.bc) assert_array_almost_equal(y, y_lin) class TestEigTridiagonal(object): def setup_method(self): self.create_trimat() def create_trimat(self): """Create the full matrix `self.fullmat`, `self.d`, and `self.e`.""" N = 10 # symmetric band matrix self.d = 1.0*ones(N) self.e = -1.0*ones(N-1) self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1)) ew, ev = linalg.eig(self.full_mat) ew = ew.real args = argsort(ew) self.w = ew[args] self.evec = ev[:, args] def test_degenerate(self): """Test error conditions.""" # Wrong sizes assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1]) # Must be real assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j) # Bad driver assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e, lapack_driver=1.) assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, lapack_driver='foo') # Bad bounds assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, select='i', select_range=(0, -1)) def test_eigvalsh_tridiagonal(self): """Compare eigenvalues of eigvalsh_tridiagonal with those of eig.""" # can't use ?STERF with subselection for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'): w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver) assert_array_almost_equal(sort(w), self.w) for driver in ('sterf', 'stev'): assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, lapack_driver='stev', select='i', select_range=(0, 1)) for driver in ('stebz', 'stemr', 'auto'): # extracting eigenvalues with respect to the full index range w_ind = eigvalsh_tridiagonal( self.d, self.e, select='i', select_range=(0, len(self.d)-1), lapack_driver=driver) assert_array_almost_equal(sort(w_ind), self.w) # extracting eigenvalues with respect to an index range ind1 = 2 ind2 = 6 w_ind = eigvalsh_tridiagonal( self.d, self.e, select='i', select_range=(ind1, ind2), lapack_driver=driver) assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1]) # extracting eigenvalues with respect to a value range v_lower = self.w[ind1] - 1.0e-5 v_upper = self.w[ind2] + 1.0e-5 w_val = eigvalsh_tridiagonal( self.d, self.e, select='v', select_range=(v_lower, v_upper), lapack_driver=driver) assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1]) def test_eigh_tridiagonal(self): """Compare eigenvalues and eigenvectors of eigh_tridiagonal with those of eig. """ # can't use ?STERF when eigenvectors are requested assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, lapack_driver='sterf') for driver in ('stebz', 'stev', 'stemr', 'auto'): w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver) evec_ = evec[:, argsort(w)] assert_array_almost_equal(sort(w), self.w) assert_array_almost_equal(abs(evec_), abs(self.evec)) assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, lapack_driver='stev', select='i', select_range=(0, 1)) for driver in ('stebz', 'stemr', 'auto'): # extracting eigenvalues with respect to an index range ind1 = 0 ind2 = len(self.d)-1 w, evec = eigh_tridiagonal( self.d, self.e, select='i', select_range=(ind1, ind2), lapack_driver=driver) assert_array_almost_equal(sort(w), self.w) assert_array_almost_equal(abs(evec), abs(self.evec)) ind1 = 2 ind2 = 6 w, evec = eigh_tridiagonal( self.d, self.e, select='i', select_range=(ind1, ind2), lapack_driver=driver) assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) assert_array_almost_equal(abs(evec), abs(self.evec[:, ind1:ind2+1])) # extracting eigenvalues with respect to a value range v_lower = self.w[ind1] - 1.0e-5 v_upper = self.w[ind2] + 1.0e-5 w, evec = eigh_tridiagonal( self.d, self.e, select='v', select_range=(v_lower, v_upper), lapack_driver=driver) assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) assert_array_almost_equal(abs(evec), abs(self.evec[:, ind1:ind2+1])) def test_eigh(): DIM = 6 v = {'dim': (DIM,), 'dtype': ('f','d','F','D'), 'overwrite': (True, False), 'lower': (True, False), 'turbo': (True, False), 'eigvals': (None, (2, DIM-2))} for dim in v['dim']: for typ in v['dtype']: for overwrite in v['overwrite']: for turbo in v['turbo']: for eigenvalues in v['eigvals']: for lower in v['lower']: eigenhproblem_standard( 'ordinary', dim, typ, overwrite, lower, turbo, eigenvalues) eigenhproblem_general( 'general ', dim, typ, overwrite, lower, turbo, eigenvalues) def test_eigh_of_sparse(): # This tests the rejection of inputs that eigh cannot currently handle. import scipy.sparse a = scipy.sparse.identity(2).tocsc() b = np.atleast_2d(a) assert_raises(ValueError, eigh, a) assert_raises(ValueError, eigh, b) def _complex_symrand(dim, dtype): a1, a2 = symrand(dim), symrand(dim) # add antisymmetric matrix as imag part a = a1 + 1j*(triu(a2)-tril(a2)) return a.astype(dtype) def eigenhproblem_standard(desc, dim, dtype, overwrite, lower, turbo, eigenvalues): """Solve a standard eigenvalue problem.""" if iscomplex(empty(1, dtype=dtype)): a = _complex_symrand(dim, dtype) else: a = symrand(dim).astype(dtype) if overwrite: a_c = a.copy() else: a_c = a w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues) assert_dtype_equal(z.dtype, dtype) w = w.astype(dtype) diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real assert_array_almost_equal(diag_, w, DIGITS[dtype]) def eigenhproblem_general(desc, dim, dtype, overwrite, lower, turbo, eigenvalues): """Solve a generalized eigenvalue problem.""" if iscomplex(empty(1, dtype=dtype)): a = _complex_symrand(dim, dtype) b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype) else: a = symrand(dim).astype(dtype) b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype) if overwrite: a_c, b_c = a.copy(), b.copy() else: a_c, b_c = a, b w, z = eigh(a, b, overwrite_a=overwrite, lower=lower, overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues) assert_dtype_equal(z.dtype, dtype) w = w.astype(dtype) diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real assert_array_almost_equal(diag1_, w, DIGITS[dtype]) diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype]) def test_eigh_integer(): a = array([[1,2],[2,7]]) b = array([[3,1],[1,5]]) w,z = eigh(a) w,z = eigh(a,b) class TestLU(object): def setup_method(self): self.a = array([[1,2,3],[1,2,3],[2,5,6]]) self.ca = array([[1,2,3],[1,2,3],[2,5j,6]]) # Those matrices are more robust to detect problems in permutation # matrices than the ones above self.b = array([[1,2,3],[4,5,6],[7,8,9]]) self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]]) # Reectangular matrices self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) # Medium sizes matrices self.med = random((30, 40)) self.cmed = random((30, 40)) + 1.j * random((30, 40)) def _test_common(self, data): p,l,u = lu(data) assert_array_almost_equal(dot(dot(p,l),u),data) pl,u = lu(data,permute_l=1) assert_array_almost_equal(dot(pl,u),data) # Simple tests def test_simple(self): self._test_common(self.a) def test_simple_complex(self): self._test_common(self.ca) def test_simple2(self): self._test_common(self.b) def test_simple2_complex(self): self._test_common(self.cb) # rectangular matrices tests def test_hrectangular(self): self._test_common(self.hrect) def test_vrectangular(self): self._test_common(self.vrect) def test_hrectangular_complex(self): self._test_common(self.chrect) def test_vrectangular_complex(self): self._test_common(self.cvrect) # Bigger matrices def test_medium1(self): """Check lu decomposition on medium size, rectangular matrix.""" self._test_common(self.med) def test_medium1_complex(self): """Check lu decomposition on medium size, rectangular matrix.""" self._test_common(self.cmed) def test_check_finite(self): p, l, u = lu(self.a, check_finite=False) assert_array_almost_equal(dot(dot(p,l),u), self.a) def test_simple_known(self): # Ticket #1458 for order in ['C', 'F']: A = np.array([[2, 1],[0, 1.]], order=order) LU, P = lu_factor(A) assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]])) assert_array_equal(P, np.array([0, 1])) class TestLUSingle(TestLU): """LU testers for single precision, real and double""" def setup_method(self): TestLU.setup_method(self) self.a = self.a.astype(float32) self.ca = self.ca.astype(complex64) self.b = self.b.astype(float32) self.cb = self.cb.astype(complex64) self.hrect = self.hrect.astype(float32) self.chrect = self.hrect.astype(complex64) self.vrect = self.vrect.astype(float32) self.cvrect = self.vrect.astype(complex64) self.med = self.vrect.astype(float32) self.cmed = self.vrect.astype(complex64) class TestLUSolve(object): def setup_method(self): seed(1234) def test_lu(self): a0 = random((10,10)) b = random((10,)) for order in ['C', 'F']: a = np.array(a0, order=order) x1 = solve(a,b) lu_a = lu_factor(a) x2 = lu_solve(lu_a,b) assert_array_almost_equal(x1,x2) def test_check_finite(self): a = random((10,10)) b = random((10,)) x1 = solve(a,b) lu_a = lu_factor(a, check_finite=False) x2 = lu_solve(lu_a,b, check_finite=False) assert_array_almost_equal(x1,x2) class TestSVD_GESDD(object): def setup_method(self): self.lapack_driver = 'gesdd' seed(1234) def test_degenerate(self): assert_raises(TypeError, svd, [[1.]], lapack_driver=1.) assert_raises(ValueError, svd, [[1.]], lapack_driver='foo') def test_simple(self): a = [[1,2,3],[1,20,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_singular(self): a = [[1,2,3],[1,2,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_underdet(self): a = [[1,2,3],[4,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0])) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_overdet(self): a = [[1,2],[4,5],[3,4]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1])) assert_array_almost_equal(dot(transpose(vh),vh),identity(2)) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_random(self): n = 20 m = 15 for i in range(3): for a in [random([n,m]),random([m,n])]: for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1])) assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0])) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_complex(self): a = [[1,2,3],[1,2j,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0])) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_random_complex(self): n = 20 m = 15 for i in range(3): for full_matrices in (True, False): for a in [random([n,m]),random([m,n])]: a = a + 1j*random(list(a.shape)) u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) # This fails when [m,n] # assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char)) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_crash_1580(self): sizes = [(13, 23), (30, 50), (60, 100)] np.random.seed(1234) for sz in sizes: for dt in [np.float32, np.float64, np.complex64, np.complex128]: a = np.random.rand(*sz).astype(dt) # should not crash svd(a, lapack_driver=self.lapack_driver) def test_check_finite(self): a = [[1,2,3],[1,20,3],[2,5,6]] u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_gh_5039(self): # This is a smoke test for https://github.com/scipy/scipy/issues/5039 # # The following is reported to raise "ValueError: On entry to DGESDD # parameter number 12 had an illegal value". # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')` # This is reported to only show up on LAPACK 3.0.3. # # The matrix below is taken from the call to # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest b = np.array( [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.], [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.], [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.], [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]]) svd(b, lapack_driver=self.lapack_driver) class TestSVD_GESVD(TestSVD_GESDD): def setup_method(self): self.lapack_driver = 'gesvd' seed(1234) class TestSVDVals(object): def test_empty(self): for a in [[]], np.empty((2, 0)), np.ones((0, 3)): s = svdvals(a) assert_equal(s, np.empty(0)) def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] s = svdvals(a) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) def test_simple_underdet(self): a = [[1,2,3],[4,5,6]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_overdet(self): a = [[1,2],[4,5],[3,4]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_complex(self): a = [[1,2,3],[1,20,3j],[2,5,6]] s = svdvals(a) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) def test_simple_underdet_complex(self): a = [[1,2,3],[4,5j,6]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_overdet_complex(self): a = [[1,2],[4,5],[3j,4]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_check_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] s = svdvals(a, check_finite=False) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) @pytest.mark.slow def test_crash_2609(self): np.random.seed(1234) a = np.random.rand(1500, 2800) # Shouldn't crash: svdvals(a) class TestDiagSVD(object): def test_simple(self): assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]]) class TestQR(object): def setup_method(self): seed(1234) def test_simple(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_left(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) c = [1, 2, 3] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) qc,r2 = qr_multiply(a, identity(3), "left") assert_array_almost_equal(q, qc) def test_simple_right(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) c = [1, 2, 3] qc,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), qc) assert_array_almost_equal(r, r2) qc,r = qr_multiply(a, identity(3)) assert_array_almost_equal(q, qc) def test_simple_pivoting(self): a = np.asarray([[8,2,3],[2,9,3],[5,3,6]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_left_pivoting(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) def test_simple_right_pivoting(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3] qc,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), qc) def test_simple_trap(self): a = [[8,2,3],[2,9,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) def test_simple_trap_pivoting(self): a = np.asarray([[8,2,3],[2,9,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall(self): # full version a = [[8,2],[2,9],[5,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_tall_pivoting(self): # full version pivoting a = np.asarray([[8,2],[2,9],[5,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall_e(self): # economy version a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (3,2)) assert_equal(r.shape, (2,2)) def test_simple_tall_e_pivoting(self): # economy version pivoting a = np.asarray([[8,2],[2,9],[5,3]]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall_left(self): a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode="economic") c = [1, 2] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) c = array([1,2,0]) qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) assert_array_almost_equal(dot(q, c[:2]), qc) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_tall_left_pivoting(self): a = [[8,2],[2,9],[5,3]] q,r,jpvt = qr(a, mode="economic", pivoting=True) c = [1, 2] qc,r,kpvt = qr_multiply(a, c, "left", True) assert_array_equal(jpvt, kpvt) assert_array_almost_equal(dot(q, c), qc) qc,r,jpvt = qr_multiply(a, identity(2), "left", True) assert_array_almost_equal(qc, q) def test_simple_tall_right(self): a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode="economic") c = [1, 2, 3] cq,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) assert_array_almost_equal(r, r2) cq,r = qr_multiply(a, identity(3)) assert_array_almost_equal(cq, q) def test_simple_tall_right_pivoting(self): a = [[8,2],[2,9],[5,3]] q,r,jpvt = qr(a, pivoting=True, mode="economic") c = [1, 2, 3] cq,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), cq) cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True) assert_array_almost_equal(cq, q) def test_simple_fat(self): # full version a = [[8,2,5],[2,9,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) def test_simple_fat_pivoting(self): # full version pivoting a = np.asarray([[8,2,5],[2,9,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_fat_e(self): # economy version a = [[8,2,3],[2,9,5]] q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) def test_simple_fat_e_pivoting(self): # economy version pivoting a = np.asarray([[8,2,3],[2,9,5]]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_fat_left(self): a = [[8,2,3],[2,9,5]] q,r = qr(a, mode="economic") c = [1, 2] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_fat_left_pivoting(self): a = [[8,2,3],[2,9,5]] q,r,jpvt = qr(a, mode="economic", pivoting=True) c = [1, 2] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) qc,r,jpvt = qr_multiply(a, identity(2), "left", True) assert_array_almost_equal(qc, q) def test_simple_fat_right(self): a = [[8,2,3],[2,9,5]] q,r = qr(a, mode="economic") c = [1, 2] cq,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) assert_array_almost_equal(r, r2) cq,r = qr_multiply(a, identity(2)) assert_array_almost_equal(cq, q) def test_simple_fat_right_pivoting(self): a = [[8,2,3],[2,9,5]] q,r,jpvt = qr(a, pivoting=True, mode="economic") c = [1, 2] cq,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), cq) cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True) assert_array_almost_equal(cq, q) def test_simple_complex(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_complex_left(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(3), "left") assert_array_almost_equal(q, qc) def test_simple_complex_right(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), qc) qc,r = qr_multiply(a, identity(3)) assert_array_almost_equal(q, qc) def test_simple_tall_complex_left(self): a = [[8,2+3j],[2,9],[5+7j,3]] q,r = qr(a, mode="economic") c = [1, 2+2j] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) c = array([1,2,0]) qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) assert_array_almost_equal(dot(q, c[:2]), qc) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_complex_left_conjugate(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, "left", conjugate=True) assert_array_almost_equal(dot(q.conjugate(), c), qc) def test_simple_complex_tall_left_conjugate(self): a = [[3,3+4j],[5,2+2j],[3,2]] q,r = qr(a, mode='economic') c = [1, 3+4j] qc,r = qr_multiply(a, c, "left", conjugate=True) assert_array_almost_equal(dot(q.conjugate(), c), qc) def test_simple_complex_right_conjugate(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, conjugate=True) assert_array_almost_equal(dot(c, q.conjugate()), qc) def test_simple_complex_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_complex_left_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3+4j] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) def test_simple_complex_right_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3+4j] qc,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), qc) def test_random(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a) def test_random_left(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) c = random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(q, qc) def test_random_right(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) c = random([n]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(n)) assert_array_almost_equal(q, cq) def test_random_pivoting(self): n = 20 for k in range(2): a = random([n,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_tall(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a) def test_random_tall_left(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode="economic") c = random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(qc, q) def test_random_tall_right(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode="economic") c = random([m]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(m)) assert_array_almost_equal(cq, q) def test_random_tall_pivoting(self): # full version pivoting m = 200 n = 100 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_tall_e(self): # economy version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (m,n)) assert_equal(r.shape, (n,n)) def test_random_tall_e_pivoting(self): # economy version pivoting m = 200 n = 100 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (m,n)) assert_equal(r.shape, (n,n)) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_trap(self): m = 100 n = 200 for k in range(2): a = random([m,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a) def test_random_trap_pivoting(self): m = 100 n = 200 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) assert_array_almost_equal(dot(q,r),a) def test_random_complex_left(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) c = random([n])+1j*random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(q, qc) def test_random_complex_right(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) c = random([n])+1j*random([n]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(n)) assert_array_almost_equal(q, cq) def test_random_complex_pivoting(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_check_finite(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a, check_finite=False) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_lwork(self): a = [[8,2,3],[2,9,3],[5,3,6]] # Get comparison values q,r = qr(a, lwork=None) # Test against minimum valid lwork q2,r2 = qr(a, lwork=3) assert_array_almost_equal(q2,q) assert_array_almost_equal(r2,r) # Test against larger lwork q3,r3 = qr(a, lwork=10) assert_array_almost_equal(q3,q) assert_array_almost_equal(r3,r) # Test against explicit lwork=-1 q4,r4 = qr(a, lwork=-1) assert_array_almost_equal(q4,q) assert_array_almost_equal(r4,r) # Test against invalid lwork assert_raises(Exception, qr, (a,), {'lwork':0}) assert_raises(Exception, qr, (a,), {'lwork':2}) class TestRQ(object): def setup_method(self): seed(1234) def test_simple(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_r(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a) r2 = rq(a, mode='r') assert_array_almost_equal(r, r2) def test_random(self): n = 20 for k in range(2): a = random([n,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_simple_trap(self): a = [[8,2,3],[2,9,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_simple_tall(self): a = [[8,2],[2,9],[5,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(r,q),a) def test_simple_fat(self): a = [[8,2,5],[2,9,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_simple_complex(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] r,q = rq(a) assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_random_tall(self): m = 200 n = 100 for k in range(2): a = random([m,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_trap(self): m = 100 n = 200 for k in range(2): a = random([m,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_trap_economic(self): m = 100 n = 200 for k in range(2): a = random([m,n]) r,q = rq(a, mode='economic') assert_array_almost_equal(dot(q,transpose(q)),identity(m)) assert_array_almost_equal(dot(r,q),a) assert_equal(q.shape, (m, n)) assert_equal(r.shape, (m, m)) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) r,q = rq(a) assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_complex_economic(self): m = 100 n = 200 for k in range(2): a = random([m,n])+1j*random([m,n]) r,q = rq(a, mode='economic') assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m)) assert_array_almost_equal(dot(r,q),a) assert_equal(q.shape, (m, n)) assert_equal(r.shape, (m, m)) def test_check_finite(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a, check_finite=False) assert_array_almost_equal(dot(q, transpose(q)),identity(3)) assert_array_almost_equal(dot(r,q),a) transp = transpose any = sometrue class TestSchur(object): def test_simple(self): a = [[8,12,3],[2,9,3],[10,3,6]] t,z = schur(a) assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) tc,zc = schur(a,'complex') assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc)))) assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a) tc2,zc2 = rsf2csf(tc,zc) assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a) def test_sort(self): a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] s,u,sdim = schur(a,sort='lhp') assert_array_almost_equal([[0.1134,0.5436,0.8316,0.], [-0.1134,-0.8245,0.5544,0.], [-0.8213,0.1308,0.0265,-0.5547], [-0.5475,0.0872,0.0177,0.8321]], u,3) assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174], [0.,-0.5000,9.4472,-0.7184], [0.,0.,1.4142,-0.1456], [0.,0.,0.,0.5]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='rhp') assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], [-0.4862,0.4930,-0.1434,-0.7071], [0.6042,0.3944,-0.6924,0.], [0.4028,0.5986,0.6924,0.]], u,3) assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], [0.,0.5,6.5809,-3.1870], [0.,0.,-1.4142,0.9270], [0.,0.,0.,-0.5]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='iuc') assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042], [-0.8321,0.,-0.3814,-0.4028], [0.,0.7071,-0.5134,0.4862], [0.,0.7071,0.5134,-0.4862]], u,3) assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974], [0.,0.5000,-3.3191,-14.4130], [0.,0.,1.4142,2.1573], [0.,0.,0.,-1.4142]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='ouc') assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.], [-0.4862,0.5134,0.7071,0.], [0.6042,0.5721,0.,-0.5547], [0.4028,0.3814,0.,0.8321]], u,3) assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974], [0.,-1.4142,3.3191,6.5809], [0.,0.,-0.5000,0.], [0.,0.,0.,0.5000]], s,3) assert_equal(2,sdim) rhp_function = lambda x: x >= 0.0 s,u,sdim = schur(a,sort=rhp_function) assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], [-0.4862,0.4930,-0.1434,-0.7071], [0.6042,0.3944,-0.6924,0.], [0.4028,0.5986,0.6924,0.]], u,3) assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], [0.,0.5,6.5809,-3.1870], [0.,0.,-1.4142,0.9270], [0.,0.,0.,-0.5]], s,3) assert_equal(2,sdim) def test_sort_errors(self): a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] assert_raises(ValueError, schur, a, sort='unsupported') assert_raises(ValueError, schur, a, sort=1) def test_check_finite(self): a = [[8,12,3],[2,9,3],[10,3,6]] t,z = schur(a, check_finite=False) assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) class TestHessenberg(object): def test_simple(self): a = [[-149, -50,-154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000,42.2037,-156.3165], [-537.6783,152.5511,-554.9272], [0,0.0728, 2.4489]] h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) assert_array_almost_equal(h,h1,decimal=4) def test_simple_complex(self): a = [[-149, -50,-154], [537, 180j, 546], [-27j, -9, -25]] h,q = hessenberg(a,calc_q=1) h1 = dot(transp(conj(q)),dot(a,q)) assert_array_almost_equal(h1,h) def test_simple2(self): a = [[1,2,3,4,5,6,7], [0,2,3,4,6,7,2], [0,2,2,3,0,3,2], [0,0,2,8,0,0,2], [0,3,1,2,0,1,2], [0,1,2,3,0,1,0], [0,0,0,0,0,1,2]] h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) def test_simple3(self): a = np.eye(3) a[-1, 0] = 2 h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(dot(transp(q), dot(a, q)), h) def test_random(self): n = 20 for k in range(2): a = random([n,n]) h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) h,q = hessenberg(a,calc_q=1) h1 = dot(transp(conj(q)),dot(a,q)) assert_array_almost_equal(h1,h) def test_check_finite(self): a = [[-149, -50,-154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000,42.2037,-156.3165], [-537.6783,152.5511,-554.9272], [0,0.0728, 2.4489]] h,q = hessenberg(a,calc_q=1, check_finite=False) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) assert_array_almost_equal(h,h1,decimal=4) def test_2x2(self): a = [[2, 1], [7, 12]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q, np.eye(2)) assert_array_almost_equal(h, a) b = [[2-7j, 1+2j], [7+3j, 12-2j]] h2, q2 = hessenberg(b, calc_q=1) assert_array_almost_equal(q2, np.eye(2)) assert_array_almost_equal(h2, b) class TestQZ(object): def setup_method(self): seed(12345) def test_qz_single(self): n = 5 A = random([n,n]).astype(float32) B = random([n,n]).astype(float32) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A, decimal=5) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B, decimal=5) assert_array_almost_equal(dot(Q,Q.T), eye(n), decimal=5) assert_array_almost_equal(dot(Z,Z.T), eye(n), decimal=5) assert_(all(diag(BB) >= 0)) def test_qz_double(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) assert_array_almost_equal(dot(Q,Q.T), eye(n)) assert_array_almost_equal(dot(Z,Z.T), eye(n)) assert_(all(diag(BB) >= 0)) def test_qz_complex(self): n = 5 A = random([n,n]) + 1j*random([n,n]) B = random([n,n]) + 1j*random([n,n]) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) assert_(all(diag(BB) >= 0)) assert_(all(diag(BB).imag == 0)) def test_qz_complex64(self): n = 5 A = (random([n,n]) + 1j*random([n,n])).astype(complex64) B = (random([n,n]) + 1j*random([n,n])).astype(complex64) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5) assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5) assert_(all(diag(BB) >= 0)) assert_(all(diag(BB).imag == 0)) def test_qz_double_complex(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B, output='complex') aa = dot(dot(Q,AA),Z.conjugate().T) assert_array_almost_equal(aa.real, A) assert_array_almost_equal(aa.imag, 0) bb = dot(dot(Q,BB),Z.conjugate().T) assert_array_almost_equal(bb.real, B) assert_array_almost_equal(bb.imag, 0) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) assert_(all(diag(BB) >= 0)) def test_qz_double_sort(self): # from http://www.nag.com/lapack-ex/node119.html # NOTE: These matrices may be ill-conditioned and lead to a # seg fault on certain python versions when compiled with # sse2 or sse3 older ATLAS/LAPACK binaries for windows # A = np.array([[3.9, 12.5, -34.5, -0.5], # [ 4.3, 21.5, -47.5, 7.5], # [ 4.3, 21.5, -43.5, 3.5], # [ 4.4, 26.0, -46.0, 6.0 ]]) # B = np.array([[ 1.0, 2.0, -3.0, 1.0], # [1.0, 3.0, -5.0, 4.0], # [1.0, 3.0, -4.0, 3.0], # [1.0, 3.0, -4.0, 4.0]]) A = np.array([[3.9, 12.5, -34.5, 2.5], [4.3, 21.5, -47.5, 7.5], [4.3, 1.5, -43.5, 3.5], [4.4, 6.0, -46.0, 6.0]]) B = np.array([[1.0, 1.0, -3.0, 1.0], [1.0, 3.0, -5.0, 4.4], [1.0, 2.0, -4.0, 1.0], [1.2, 3.0, -4.0, 4.0]]) sort = lambda ar,ai,beta: ai == 0 assert_raises(ValueError, qz, A, B, sort=sort) if False: AA,BB,Q,Z,sdim = qz(A,B,sort=sort) # assert_(sdim == 2) assert_(sdim == 4) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) # test absolute values bc the sign is ambiguous and might be platform # dependent assert_array_almost_equal(np.abs(AA), np.abs(np.array( [[35.7864, -80.9061, -12.0629, -9.498], [0., 2.7638, -2.3505, 7.3256], [0., 0., 0.6258, -0.0398], [0., 0., 0., -12.8217]])), 4) assert_array_almost_equal(np.abs(BB), np.abs(np.array( [[4.5324, -8.7878, 3.2357, -3.5526], [0., 1.4314, -2.1894, 0.9709], [0., 0., 1.3126, -0.3468], [0., 0., 0., 0.559]])), 4) assert_array_almost_equal(np.abs(Q), np.abs(np.array( [[-0.4193, -0.605, -0.1894, -0.6498], [-0.5495, 0.6987, 0.2654, -0.3734], [-0.4973, -0.3682, 0.6194, 0.4832], [-0.5243, 0.1008, -0.7142, 0.4526]])), 4) assert_array_almost_equal(np.abs(Z), np.abs(np.array( [[-0.9471, -0.2971, -0.1217, 0.0055], [-0.0367, 0.1209, 0.0358, 0.9913], [0.3171, -0.9041, -0.2547, 0.1312], [0.0346, 0.2824, -0.9587, 0.0014]])), 4) # test absolute values bc the sign is ambiguous and might be platform # dependent # assert_array_almost_equal(abs(AA), abs(np.array([ # [3.8009, -69.4505, 50.3135, -43.2884], # [0.0000, 9.2033, -0.2001, 5.9881], # [0.0000, 0.0000, 1.4279, 4.4453], # [0.0000, 0.0000, 0.9019, -1.1962]])), 4) # assert_array_almost_equal(abs(BB), abs(np.array([ # [1.9005, -10.2285, 0.8658, -5.2134], # [0.0000, 2.3008, 0.7915, 0.4262], # [0.0000, 0.0000, 0.8101, 0.0000], # [0.0000, 0.0000, 0.0000, -0.2823]])), 4) # assert_array_almost_equal(abs(Q), abs(np.array([ # [0.4642, 0.7886, 0.2915, -0.2786], # [0.5002, -0.5986, 0.5638, -0.2713], # [0.5002, 0.0154, -0.0107, 0.8657], # [0.5331, -0.1395, -0.7727, -0.3151]])), 4) # assert_array_almost_equal(dot(Q,Q.T), eye(4)) # assert_array_almost_equal(abs(Z), abs(np.array([ # [0.9961, -0.0014, 0.0887, -0.0026], # [0.0057, -0.0404, -0.0938, -0.9948], # [0.0626, 0.7194, -0.6908, 0.0363], # [0.0626, -0.6934, -0.7114, 0.0956]])), 4) # assert_array_almost_equal(dot(Z,Z.T), eye(4)) # def test_qz_complex_sort(self): # cA = np.array([ # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j], # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j], # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j], # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]]) # cB = np.array([ # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j], # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j], # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j], # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]]) # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp') # eigenvalues = diag(AAS)/diag(BBS) # assert_(all(np.real(eigenvalues[:sdim] < 0))) # assert_(all(np.real(eigenvalues[sdim:] > 0))) def test_check_finite(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B,check_finite=False) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) assert_array_almost_equal(dot(Q,Q.T), eye(n)) assert_array_almost_equal(dot(Z,Z.T), eye(n)) assert_(all(diag(BB) >= 0)) def _make_pos(X): # the decompositions can have different signs than verified results return np.sign(X)*X class TestOrdQZ(object): @classmethod def setup_class(cls): # http://www.nag.com/lapack-ex/node119.html A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j, 7.5 + 0.5j], [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j, -10.5 - 1.5j], [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j, -7.5 - 3.5j], [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j, -19.0 - 32.5j]]) B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j], [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j], [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j], [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]]) # http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml A2 = np.array([[3.9, 12.5, -34.5, -0.5], [4.3, 21.5, -47.5, 7.5], [4.3, 21.5, -43.5, 3.5], [4.4, 26.0, -46.0, 6.0]]) B2 = np.array([[1, 2, -3, 1], [1, 3, -5, 4], [1, 3, -4, 3], [1, 3, -4, 4]]) # example with the eigenvalues # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j, # 0.61244091 # thus featuring: # * one complex conjugate eigenvalue pair, # * one eigenvalue in the lhp # * 2 eigenvalues in the unit circle # * 2 non-real eigenvalues A3 = np.array([[5., 1., 3., 3.], [4., 4., 2., 7.], [7., 4., 1., 3.], [0., 4., 8., 7.]]) B3 = np.array([[8., 10., 6., 10.], [7., 7., 2., 9.], [9., 1., 6., 6.], [5., 1., 4., 7.]]) # example with infinite eigenvalues A4 = np.eye(2) B4 = np.diag([0, 1]) # example with (alpha, beta) = (0, 0) A5 = np.diag([1, 0]) B5 = np.diag([1, 0]) cls.A = [A1, A2, A3, A4, A5] cls.B = [B1, B2, B3, B4, A5] def qz_decomp(self, sort): try: olderr = np.seterr('raise') ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)] finally: np.seterr(**olderr) return tuple(ret) def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z): Id = np.eye(*A.shape) # make sure Q and Z are orthogonal assert_array_almost_equal(Q.dot(Q.T.conj()), Id) assert_array_almost_equal(Z.dot(Z.T.conj()), Id) # check factorization assert_array_almost_equal(Q.dot(AA), A.dot(Z)) assert_array_almost_equal(Q.dot(BB), B.dot(Z)) # check shape of AA and BB assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape)) assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape)) # check eigenvalues for i in range(A.shape[0]): # does the current diagonal element belong to a 2-by-2 block # that was already checked? if i > 0 and A[i, i - 1] != 0: continue # take care of 2-by-2 blocks if i < AA.shape[0] - 1 and AA[i + 1, i] != 0: evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2]) # make sure the pair of complex conjugate eigenvalues # is ordered consistently (positive imaginary part first) if evals[0].imag < 0: evals = evals[[1, 0]] tmp = alpha[i:i + 2]/beta[i:i + 2] if tmp[0].imag < 0: tmp = tmp[[1, 0]] assert_array_almost_equal(evals, tmp) else: if alpha[i] == 0 and beta[i] == 0: assert_equal(AA[i, i], 0) assert_equal(BB[i, i], 0) elif beta[i] == 0: assert_equal(BB[i, i], 0) else: assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i]) sortfun = _select_function(sort) lastsort = True for i in range(A.shape[0]): cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]])) # once the sorting criterion was not matched all subsequent # eigenvalues also shouldn't match if not lastsort: assert(not cursort) lastsort = cursort def check_all(self, sort): ret = self.qz_decomp(sort) for reti, Ai, Bi in zip(ret, self.A, self.B): self.check(Ai, Bi, sort, *reti) def test_lhp(self): self.check_all('lhp') def test_rhp(self): self.check_all('rhp') def test_iuc(self): self.check_all('iuc') def test_ouc(self): self.check_all('ouc') def test_ref(self): # real eigenvalues first (top-left corner) def sort(x, y): out = np.empty_like(x, dtype=bool) nonzero = (y != 0) out[~nonzero] = False out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0 return out self.check_all(sort) def test_cef(self): # complex eigenvalues first (top-left corner) def sort(x, y): out = np.empty_like(x, dtype=bool) nonzero = (y != 0) out[~nonzero] = False out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0 return out self.check_all(sort) def test_diff_input_types(self): ret = ordqz(self.A[1], self.B[2], sort='lhp') self.check(self.A[1], self.B[2], 'lhp', *ret) ret = ordqz(self.B[2], self.A[1], sort='lhp') self.check(self.B[2], self.A[1], 'lhp', *ret) def test_sort_explicit(self): # Test order of the eigenvalues in the 2 x 2 case where we can # explicitly compute the solution A1 = np.eye(2) B1 = np.diag([-2, 0.5]) expected1 = [('lhp', [-0.5, 2]), ('rhp', [2, -0.5]), ('iuc', [-0.5, 2]), ('ouc', [2, -0.5])] A2 = np.eye(2) B2 = np.diag([-2 + 1j, 0.5 + 0.5j]) expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]), ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])] # 'lhp' is ambiguous so don't test it A3 = np.eye(2) B3 = np.diag([2, 0]) expected3 = [('rhp', [0.5, np.inf]), ('iuc', [0.5, np.inf]), ('ouc', [np.inf, 0.5])] # 'rhp' is ambiguous so don't test it A4 = np.eye(2) B4 = np.diag([-2, 0]) expected4 = [('lhp', [-0.5, np.inf]), ('iuc', [-0.5, np.inf]), ('ouc', [np.inf, -0.5])] A5 = np.diag([0, 1]) B5 = np.diag([0, 0.5]) # 'lhp' and 'iuc' are ambiguous so don't test them expected5 = [('rhp', [2, np.nan]), ('ouc', [2, np.nan])] A = [A1, A2, A3, A4, A5] B = [B1, B2, B3, B4, B5] expected = [expected1, expected2, expected3, expected4, expected5] for Ai, Bi, expectedi in zip(A, B, expected): for sortstr, expected_eigvals in expectedi: _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr) azero = (alpha == 0) bzero = (beta == 0) x = np.empty_like(alpha) x[azero & bzero] = np.nan x[~azero & bzero] = np.inf x[~bzero] = alpha[~bzero]/beta[~bzero] assert_allclose(expected_eigvals, x) class TestOrdQZWorkspaceSize(object): def setup_method(self): seed(12345) def test_decompose(self): N = 202 # raises error if lwork parameter to dtrsen is too small for ddtype in [np.float32, np.float64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) # sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2 sort = lambda alpha, beta: alpha < beta [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real') for ddtype in [np.complex, np.complex64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) sort = lambda alpha, beta: alpha < beta [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex') @pytest.mark.slow def test_decompose_ouc(self): N = 202 # segfaults if lwork parameter to dtrsen is too small for ddtype in [np.float32, np.float64, np.complex, np.complex64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) [S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc') class TestDatacopied(object): def test_datacopied(self): from scipy.linalg.decomp import _datacopied M = matrix([[0,1],[2,3]]) A = asarray(M) L = M.tolist() M2 = M.copy() class Fake1: def __array__(self): return A class Fake2: __array_interface__ = A.__array_interface__ F1 = Fake1() F2 = Fake2() for item, status in [(M, False), (A, False), (L, True), (M2, False), (F1, False), (F2, False)]: arr = asarray(item) assert_equal(_datacopied(arr, item), status, err_msg=repr(item)) def test_aligned_mem_float(): """Check linalg works with non-aligned memory""" # Allocate 402 bytes of memory (allocated on boundary) a = arange(402, dtype=np.uint8) # Create an array with boundary offset 4 z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) z.shape = 10, 10 eig(z, overwrite_a=True) eig(z.T, overwrite_a=True) def test_aligned_mem(): """Check linalg works with non-aligned memory""" # Allocate 804 bytes of memory (allocated on boundary) a = arange(804, dtype=np.uint8) # Create an array with boundary offset 4 z = np.frombuffer(a.data, offset=4, count=100, dtype=float) z.shape = 10, 10 eig(z, overwrite_a=True) eig(z.T, overwrite_a=True) def test_aligned_mem_complex(): """Check that complex objects don't need to be completely aligned""" # Allocate 1608 bytes of memory (allocated on boundary) a = zeros(1608, dtype=np.uint8) # Create an array with boundary offset 8 z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) z.shape = 10, 10 eig(z, overwrite_a=True) # This does not need special handling eig(z.T, overwrite_a=True) def check_lapack_misaligned(func, args, kwargs): args = list(args) for i in range(len(args)): a = args[:] if isinstance(a[i],np.ndarray): # Try misaligning a[i] aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype) aa.shape = a[i].shape aa[...] = a[i] a[i] = aa func(*a,**kwargs) if len(a[i].shape) > 1: a[i] = a[i].T func(*a,**kwargs) @pytest.mark.xfail(run=False, reason="Ticket #1152, triggers a segfault in rare cases.") def test_lapack_misaligned(): M = np.eye(10,dtype=float) R = np.arange(100) R.shape = 10,10 S = np.arange(20000,dtype=np.uint8) S = np.frombuffer(S.data, offset=4, count=100, dtype=float) S.shape = 10, 10 b = np.ones(10) LU, piv = lu_factor(S) for (func, args, kwargs) in [ (eig,(S,),dict(overwrite_a=True)), # crash (eigvals,(S,),dict(overwrite_a=True)), # no crash (lu,(S,),dict(overwrite_a=True)), # no crash (lu_factor,(S,),dict(overwrite_a=True)), # no crash (lu_solve,((LU,piv),b),dict(overwrite_b=True)), (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)), (svd,(M,),dict(overwrite_a=True)), # no crash (svd,(R,),dict(overwrite_a=True)), # no crash (svd,(S,),dict(overwrite_a=True)), # crash (svdvals,(S,),dict()), # no crash (svdvals,(S,),dict(overwrite_a=True)), # crash (cholesky,(M,),dict(overwrite_a=True)), # no crash (qr,(S,),dict(overwrite_a=True)), # crash (rq,(S,),dict(overwrite_a=True)), # crash (hessenberg,(S,),dict(overwrite_a=True)), # crash (schur,(S,),dict(overwrite_a=True)), # crash ]: check_lapack_misaligned(func, args, kwargs) # not properly tested # cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd class TestOverwrite(object): def test_eig(self): assert_no_overwrite(eig, [(3,3)]) assert_no_overwrite(eig, [(3,3), (3,3)]) def test_eigh(self): assert_no_overwrite(eigh, [(3,3)]) assert_no_overwrite(eigh, [(3,3), (3,3)]) def test_eig_banded(self): assert_no_overwrite(eig_banded, [(3,2)]) def test_eigvals(self): assert_no_overwrite(eigvals, [(3,3)]) def test_eigvalsh(self): assert_no_overwrite(eigvalsh, [(3,3)]) def test_eigvals_banded(self): assert_no_overwrite(eigvals_banded, [(3,2)]) def test_hessenberg(self): assert_no_overwrite(hessenberg, [(3,3)]) def test_lu_factor(self): assert_no_overwrite(lu_factor, [(3,3)]) def test_lu_solve(self): x = np.array([[1,2,3], [4,5,6], [7,8,8]]) xlu = lu_factor(x) assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) def test_lu(self): assert_no_overwrite(lu, [(3,3)]) def test_qr(self): assert_no_overwrite(qr, [(3,3)]) def test_rq(self): assert_no_overwrite(rq, [(3,3)]) def test_schur(self): assert_no_overwrite(schur, [(3,3)]) def test_schur_complex(self): assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)], dtypes=[np.float32, np.float64]) def test_svd(self): assert_no_overwrite(svd, [(3,3)]) assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)]) def test_svdvals(self): assert_no_overwrite(svdvals, [(3,3)]) def _check_orth(n, dtype, skip_big=False): X = np.ones((n, 2), dtype=float).astype(dtype) eps = np.finfo(dtype).eps tol = 1000 * eps Y = orth(X) assert_equal(Y.shape, (n, 1)) assert_allclose(Y, Y.mean(), atol=tol) Y = orth(X.T) assert_equal(Y.shape, (2, 1)) assert_allclose(Y, Y.mean(), atol=tol) if n > 5 and not skip_big: np.random.seed(1) X = np.random.rand(n, 5).dot(np.random.rand(5, n)) X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n)) X = X.astype(dtype) Y = orth(X, rcond=1e-3) assert_equal(Y.shape, (n, 5)) Y = orth(X, rcond=1e-6) assert_equal(Y.shape, (n, 5 + 1)) @pytest.mark.slow @pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, reason="test only on 64-bit, else too slow") def test_orth_memory_efficiency(): # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable. # Keep in mind that @pytest.mark.slow tests are likely to be running # under configurations that support 4Gb+ memory for tests related to # 32 bit overflow. n = 10*1000*1000 try: _check_orth(n, np.float64, skip_big=True) except MemoryError: raise AssertionError('memory error perhaps caused by orth regression') def test_orth(): dtypes = [np.float32, np.float64, np.complex64, np.complex128] sizes = [1, 2, 3, 10, 100] for dt, n in itertools.product(dtypes, sizes): _check_orth(n, dt) def test_null_space(): np.random.seed(1) dtypes = [np.float32, np.float64, np.complex64, np.complex128] sizes = [1, 2, 3, 10, 100] for dt, n in itertools.product(dtypes, sizes): X = np.ones((2, n), dtype=dt) eps = np.finfo(dt).eps tol = 1000 * eps Y = null_space(X) assert_equal(Y.shape, (n, n-1)) assert_allclose(X.dot(Y), 0, atol=tol) Y = null_space(X.T) assert_equal(Y.shape, (2, 1)) assert_allclose(X.T.dot(Y), 0, atol=tol) X = np.random.randn(1 + n//2, n) Y = null_space(X) assert_equal(Y.shape, (n, n - 1 - n//2)) assert_allclose(X.dot(Y), 0, atol=tol) if n > 5: np.random.seed(1) X = np.random.rand(n, 5).dot(np.random.rand(5, n)) X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n)) X = X.astype(dt) Y = null_space(X, rcond=1e-3) assert_equal(Y.shape, (n, n - 5)) Y = null_space(X, rcond=1e-6) assert_equal(Y.shape, (n, n - 6)) def test_subspace_angles(): H = hadamard(8, float) A = H[:, :3] B = H[:, 3:] assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14) assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14) for x in (A, B): assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]), atol=1e-14) # From MATLAB function "subspace", which effectively only returns the # last value that we calculate x = np.array( [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501 [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501 [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501 [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501 expected = 1.481454682101605 assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected, rtol=1e-12) assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected, rtol=1e-12) expected = 0.746361174247302 assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12) assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12) expected = 0.487163718534313 assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12) assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12) expected = 0.328950515907756 assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0], atol=1e-12) # Degenerate conditions assert_raises(ValueError, subspace_angles, x[0], x) assert_raises(ValueError, subspace_angles, x, x[0]) assert_raises(ValueError, subspace_angles, x[:-1], x) class TestCDF2RDF(object): def matmul(self, a, b): return np.einsum('...ij,...jk->...ik', a, b) def assert_eig_valid(self, w, v, x): assert_array_almost_equal( self.matmul(v, w), self.matmul(x, v) ) def test_single_array0x0real(self): # eig doesn't support 0x0 in old versions of numpy X = np.empty((0, 0)) w, v = np.empty(0), np.empty((0, 0)) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_single_array2x2_real(self): X = np.array([[1, 2], [3, -1]]) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_single_array2x2_complex(self): X = np.array([[1, 2], [-2, 1]]) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_single_array3x3_real(self): X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_single_array3x3_complex(self): X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_random_1d_stacked_arrays(self): # cannot test M == 0 due to bug in old numpy for M in range(1, 7): X = np.random.rand(100, M, M) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_random_2d_stacked_arrays(self): # cannot test M == 0 due to bug in old numpy for M in range(1, 7): X = np.random.rand(10, 10, M, M) w, v = np.linalg.eig(X) wr, vr = cdf2rdf(w, v) self.assert_eig_valid(wr, vr, X) def test_low_dimensionality_error(self): w, v = np.empty(()), np.array((2,)) assert_raises(ValueError, cdf2rdf, w, v) def test_not_square_error(self): # Check that passing a non-square array raises a ValueError. w, v = np.arange(3), np.arange(6).reshape(3,2) assert_raises(ValueError, cdf2rdf, w, v) def test_swapped_v_w_error(self): # Check that exchanging places of w and v raises ValueError. X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) w, v = np.linalg.eig(X) assert_raises(ValueError, cdf2rdf, v, w) def test_non_associated_error(self): # Check that passing non-associated eigenvectors raises a ValueError. w, v = np.arange(3), np.arange(16).reshape(4,4) assert_raises(ValueError, cdf2rdf, w, v) def test_not_conjugate_pairs(self): # Check that passing non-conjugate pairs raises a ValueError. X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) w, v = np.linalg.eig(X) assert_raises(ValueError, cdf2rdf, w, v) # different arrays in the stack, so not conjugate X = np.array([ [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]], [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]], ]) w, v = np.linalg.eig(X) assert_raises(ValueError, cdf2rdf, w, v)
104,105
35.956337
114
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_solvers.py
from __future__ import division, print_function, absolute_import import os import numpy as np from numpy.testing import assert_array_almost_equal import pytest from pytest import raises as assert_raises from scipy.linalg import solve_sylvester from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov from scipy.linalg import solve_continuous_are, solve_discrete_are from scipy.linalg import block_diag, solve, LinAlgError def _load_data(name): """ Load npz data file under data/ Returns a copy of the data, rather than keeping the npz file open. """ filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', name) with np.load(filename) as f: return dict(f.items()) class TestSolveLyapunov(object): cases = [ (np.array([[1, 2], [3, 4]]), np.array([[9, 10], [11, 12]])), # a, q all complex. (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), # a real; q complex. (np.array([[1.0, 2.0], [3.0, 5.0]]), np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), # a complex; q real. (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), np.array([[2.0, 2.0], [-1.0, 2.0]])), # An example from Kitagawa, 1977 (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]), np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])), # Companion matrix example. a complex; q real; a.shape[0] = 11 (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j, 0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j, 0.010+0.j], [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j], [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j]]), np.eye(11)), # https://github.com/scipy/scipy/issues/4176 (np.matrix([[0, 1], [-1/2, -1]]), (np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)), # https://github.com/scipy/scipy/issues/4176 (np.matrix([[0, 1], [-1/2, -1]]), (np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))), ] def test_continuous_squareness_and_shape(self): nsq = np.ones((3, 2)) sq = np.eye(3) assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq) assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq) assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2)) def check_continuous_case(self, a, q): x = solve_continuous_lyapunov(a, q) assert_array_almost_equal( np.dot(a, x) + np.dot(x, a.conj().transpose()), q) def check_discrete_case(self, a, q, method=None): x = solve_discrete_lyapunov(a, q, method=method) assert_array_almost_equal( np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q) def test_cases(self): for case in self.cases: self.check_continuous_case(case[0], case[1]) self.check_discrete_case(case[0], case[1]) self.check_discrete_case(case[0], case[1], method='direct') self.check_discrete_case(case[0], case[1], method='bilinear') def test_solve_continuous_are(): mat6 = _load_data('carex_6_data.npz') mat15 = _load_data('carex_15_data.npz') mat18 = _load_data('carex_18_data.npz') mat19 = _load_data('carex_19_data.npz') mat20 = _load_data('carex_20_data.npz') cases = [ # Carex examples taken from (with default parameters): # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark # Examples for the Numerical Solution of Algebraic Riccati # Equations II: Continuous-Time Case', Tech. Report SPC 95_23, # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. # # The format of the data is (a, b, q, r, knownfailure), where # knownfailure is None if the test passes or a string # indicating the reason for failure. # # Test Case 0: carex #1 (np.diag([1.], 1), np.array([[0], [1]]), block_diag(1., 2.), 1, None), # Test Case 1: carex #2 (np.array([[4, 3], [-4.5, -3.5]]), np.array([[1], [-1]]), np.array([[9, 6], [6, 4.]]), 1, None), # Test Case 2: carex #3 (np.array([[0, 1, 0, 0], [0, -1.89, 0.39, -5.53], [0, -0.034, -2.98, 2.43], [0.034, -0.0011, -0.99, -0.21]]), np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]), np.array([[2.313, 2.727, 0.688, 0.023], [2.727, 4.271, 1.148, 0.323], [0.688, 1.148, 0.313, 0.102], [0.023, 0.323, 0.102, 0.083]]), np.eye(2), None), # Test Case 3: carex #4 (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0], [0.522, -1.051, 0.596, 0, 0, 0, 0, 0], [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], [0, 0, 0.522, -1.548, 0.718, 0, 0, 0], [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], [0, 0, 0, 0, 0.922, -1.721, 0.901, 0], [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], [0, 0, 0, 0, 0, 0, 0.922, -1.943]]), np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00], [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]] ).T * 0.001, np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0], [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]), np.eye(2), None), # Test Case 4: carex #5 (np.array( [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870], [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970], [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680], [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390], [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400], [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.], [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0], [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0], [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]), np.array([[0.010, -0.011, -0.151], [0.003, -0.021, 0.000], [0.009, -0.059, 0.000], [0.024, -0.162, 0.000], [0.068, -0.445, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), np.eye(9), np.eye(3), None), # Test Case 5: carex #6 (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None), # Test Case 6: carex #7 (np.array([[1, 0], [0, -2.]]), np.array([[1e-6], [0]]), np.ones((2, 2)), 1., 'Bad residual accuracy'), # Test Case 7: carex #8 (block_diag(-0.1, -0.02), np.array([[0.100, 0.000], [0.001, 0.010]]), np.array([[100, 1000], [1000, 10000]]), np.ones((2, 2)) + block_diag(1e-6, 0), None), # Test Case 8: carex #9 (np.array([[0, 1e6], [0, 0]]), np.array([[0], [1.]]), np.eye(2), 1., None), # Test Case 9: carex #10 (np.array([[1.0000001, 1], [1., 1.0000001]]), np.eye(2), np.eye(2), np.eye(2), None), # Test Case 10: carex #11 (np.array([[3, 1.], [4, 2]]), np.array([[1], [1]]), np.array([[-11, -5], [-5, -2.]]), 1., None), # Test Case 11: carex #12 (np.array([[7000000., 2000000., -0.], [2000000., 6000000., -2000000.], [0., -2000000., 5000000.]]) / 3, np.eye(3), np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot( np.diag([1e-6, 1, 1e6])).dot( np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9, np.eye(3) * 1e6, 'Bad Residual Accuracy'), # Test Case 12: carex #13 (np.array([[0, 0.4, 0, 0], [0, 0, 0.345, 0], [0, -0.524e6, -0.465e6, 0.262e6], [0, 0, 0, -1e6]]), np.array([[0, 0, 0, 1e6]]).T, np.diag([1, 0, 1, 0]), 1., None), # Test Case 13: carex #14 (np.array([[-1e-6, 1, 0, 0], [-1, -1e-6, 0, 0], [0, 0, 1e-6, 1], [0, 0, -1, 1e-6]]), np.ones((4, 1)), np.ones((4, 4)), 1., None), # Test Case 14: carex #15 (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None), # Test Case 15: carex #16 (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90( block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1), np.eye(64), np.eye(64), np.eye(64), None), # Test Case 16: carex #17 (np.diag(np.ones((20, )), 1), np.flipud(np.eye(21, 1)), np.eye(21, 1) * np.eye(21, 1).T, 1, 'Bad Residual Accuracy'), # Test Case 17: carex #18 (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None), # Test Case 18: carex #19 (mat19['A'], mat19['B'], mat19['Q'], mat19['R'], 'Bad Residual Accuracy'), # Test Case 19: carex #20 (mat20['A'], mat20['B'], mat20['Q'], mat20['R'], 'Bad Residual Accuracy') ] # Makes the minimum precision requirements customized to the test. # Here numbers represent the number of decimals that agrees with zero # matrix when the solution x is plugged in to the equation. # # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 # # If the test is failing use "None" for that entry. # min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14, None, 9, 14, 13, 14, None, 12, None, None) def _test_factory(case, dec): """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true""" a, b, q, r, knownfailure = case if knownfailure: pytest.xfail(reason=knownfailure) x = solve_continuous_are(a, b, q, r) res = x.dot(a) + a.conj().T.dot(x) + q out_fact = x.dot(b) res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) for ind, case in enumerate(cases): _test_factory(case, min_decimal[ind]) def test_solve_discrete_are(): cases = [ # Darex examples taken from (with default parameters): # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark # Examples for the Numerical Solution of Algebraic Riccati # Equations II: Discrete-Time Case', Tech. Report SPC 95_23, # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the # Discrete-Time Algebraic Riccati Equation to Enhance Stability # of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4) # # The format of the data is (a, b, q, r, knownfailure), where # knownfailure is None if the test passes or a string # indicating the reason for failure. # # TEST CASE 0 : Complex a; real b, q, r (np.array([[2, 1-2j], [0, -3j]]), np.array([[0], [1]]), np.array([[1, 0], [0, 2]]), np.array([[1]]), None), # TEST CASE 1 :Real a, q, r; complex b (np.array([[2, 1], [0, -1]]), np.array([[-2j], [1j]]), np.array([[1, 0], [0, 2]]), np.array([[1]]), None), # TEST CASE 2 : Real a, b; complex q, r (np.array([[3, 1], [0, -1]]), np.array([[1, 2], [1, 3]]), np.array([[1, 1+1j], [1-1j, 2]]), np.array([[2, -2j], [2j, 3]]), None), # TEST CASE 3 : User-reported gh-2251 (Trac #1732) (np.array([[0.63399379, 0.54906824, 0.76253406], [0.5404729, 0.53745766, 0.08731853], [0.27524045, 0.84922129, 0.4681622]]), np.array([[0.96861695], [0.05532739], [0.78934047]]), np.eye(3), np.eye(1), None), # TEST CASE 4 : darex #1 (np.array([[4, 3], [-4.5, -3.5]]), np.array([[1], [-1]]), np.array([[9, 6], [6, 4]]), np.array([[1]]), None), # TEST CASE 5 : darex #2 (np.array([[0.9512, 0], [0, 0.9048]]), np.array([[4.877, 4.877], [-1.1895, 3.569]]), np.array([[0.005, 0], [0, 0.02]]), np.array([[1/3, 0], [0, 3]]), None), # TEST CASE 6 : darex #3 (np.array([[2, -1], [1, 0]]), np.array([[1], [0]]), np.array([[0, 0], [0, 1]]), np.array([[0]]), None), # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S) (np.array([[0, 1], [0, -1]]), np.array([[1, 0], [2, 1]]), np.array([[-4, -4], [-4, 7]]) * (1/11), np.array([[9, 3], [3, 1]]), None), # TEST CASE 8 : darex #5 (np.array([[0, 1], [0, 0]]), np.array([[0], [1]]), np.array([[1, 2], [2, 4]]), np.array([[1]]), None), # TEST CASE 9 : darex #6 (np.array([[0.998, 0.067, 0, 0], [-.067, 0.998, 0, 0], [0, 0, 0.998, 0.153], [0, 0, -.153, 0.998]]), np.array([[0.0033, 0.0200], [0.1000, -.0007], [0.0400, 0.0073], [-.0028, 0.1000]]), np.array([[1.87, 0, 0, -0.244], [0, 0.744, 0.205, 0], [0, 0.205, 0.589, 0], [-0.244, 0, 0, 1.048]]), np.eye(2), None), # TEST CASE 10 : darex #7 (np.array([[0.984750, -.079903, 0.0009054, -.0010765], [0.041588, 0.998990, -.0358550, 0.0126840], [-.546620, 0.044916, -.3299100, 0.1931800], [2.662400, -.100450, -.9245500, -.2632500]]), np.array([[0.0037112, 0.0007361], [-.0870510, 9.3411e-6], [-1.198440, -4.1378e-4], [-3.192700, 9.2535e-4]]), np.eye(4)*1e-2, np.eye(2), None), # TEST CASE 11 : darex #8 (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180], [1.0000000, 0.6000000, 0.8000000, 3.3999820], [0.0000000, 1.0000000, 1.8000000, 3.7999820], [0.0000000, 0.0000000, 0.0000000, -0.9999820]]), np.array([[1.0, -1.0, -1.0, -1.0], [0.0, 1.0, -1.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 1.0]]), np.array([[2, 1, 3, 6], [1, 2, 2, 5], [3, 2, 6, 11], [6, 5, 11, 22]]), np.eye(4), None), # TEST CASE 12 : darex #9 (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190], [40.8490, 41.3170, 16.0840, 4.4679, 1.1971], [12.2170, 26.3260, 36.1490, 15.9300, 12.3830], [4.1118, 12.8580, 27.2090, 21.4420, 40.9760], [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01, np.array([[0.0434, -0.0122], [2.6606, -1.0453], [3.7530, -5.5100], [3.6076, -6.6000], [0.4617, -0.9148]]) * 0.01, np.eye(5), np.eye(2), None), # TEST CASE 13 : darex #10 (np.kron(np.eye(2), np.diag([1, 1], k=1)), np.kron(np.eye(2), np.array([[0], [0], [1]])), np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, -1, 0], [0, 0, 0, -1, 1, 0], [0, 0, 0, 0, 0, 0]]), np.array([[3, 0], [0, 1]]), None), # TEST CASE 14 : darex #11 (0.001 * np.array( [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249], [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499], [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72], [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82], [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9], [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6], [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67], [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54], [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]), np.array([[4.7600, -0.5701, -83.6800], [0.8790, -4.7730, -2.7300], [1.4820, -13.1200, 8.8760], [3.8920, -35.1300, 24.8000], [10.3400, -92.7500, 66.8000], [7.2030, -61.5900, 38.3400], [4.4540, -36.8300, 20.2900], [1.9710, -15.5400, 6.9370], [3.7730, -30.2800, 14.6900]]) * 0.001, np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]), np.eye(3), None), # TEST CASE 15 : darex #12 - numerically least accurate example (np.array([[0, 1e6], [0, 0]]), np.array([[0], [1]]), np.eye(2), np.array([[1]]), None), # TEST CASE 16 : darex #13 (np.array([[16, 10, -2], [10, 13, -8], [-2, -8, 7]]) * (1/9), np.eye(3), 1e6 * np.eye(3), 1e6 * np.eye(3), None), # TEST CASE 17 : darex #14 (np.array([[1 - 1/1e8, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]), np.array([[1e-08], [0], [0], [0]]), np.diag([0, 0, 0, 1]), np.array([[0.25]]), None), # TEST CASE 18 : darex #15 (np.eye(100, k=1), np.flipud(np.eye(100, 1)), np.eye(100), np.array([[1]]), None) ] # Makes the minimum precision requirements customized to the test. # Here numbers represent the number of decimals that agrees with zero # matrix when the solution x is plugged in to the equation. # # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 # # If the test is failing use "None" for that entry. # min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13, 14, 13, 13, 14, 12, 2, 5, 6, 10) def _test_factory(case, dec): """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" a, b, q, r, knownfailure = case if knownfailure: pytest.xfail(reason=knownfailure) x = solve_discrete_are(a, b, q, r) res = a.conj().T.dot(x.dot(a)) - x + q res -= a.conj().T.dot(x.dot(b)).dot( solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a)) ) assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) for ind, case in enumerate(cases): _test_factory(case, min_decimal[ind]) # An infeasible example taken from https://arxiv.org/abs/1505.04861v1 A = np.triu(np.ones((3, 3))) A[0, 1] = -1 B = np.array([[1, 1, 0], [0, 0, 1]]).T Q = -2*np.ones_like(A) + np.diag([8, -1, -1.9]) R = np.diag([-10, 0.1]) assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R) def test_solve_generalized_continuous_are(): cases = [ # Two random examples differ by s term # in the absence of any literature for demanding examples. (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], [4.617139e-02, 6.948286e-01, 3.444608e-02], [9.713178e-02, 3.170995e-01, 4.387444e-01]]), np.array([[3.815585e-01, 1.868726e-01], [7.655168e-01, 4.897644e-01], [7.951999e-01, 4.455862e-01]]), np.eye(3), np.eye(2), np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], [7.093648e-01, 6.797027e-01, 1.189977e-01], [7.546867e-01, 6.550980e-01, 4.983641e-01]]), np.zeros((3, 2)), None), (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], [4.617139e-02, 6.948286e-01, 3.444608e-02], [9.713178e-02, 3.170995e-01, 4.387444e-01]]), np.array([[3.815585e-01, 1.868726e-01], [7.655168e-01, 4.897644e-01], [7.951999e-01, 4.455862e-01]]), np.eye(3), np.eye(2), np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], [7.093648e-01, 6.797027e-01, 1.189977e-01], [7.546867e-01, 6.550980e-01, 4.983641e-01]]), np.ones((3, 2)), None) ] min_decimal = (10, 10) def _test_factory(case, dec): """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" a, b, q, r, e, s, knownfailure = case if knownfailure: pytest.xfail(reason=knownfailure) x = solve_continuous_are(a, b, q, r, e, s) res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q out_fact = e.conj().T.dot(x).dot(b) + s res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) for ind, case in enumerate(cases): _test_factory(case, min_decimal[ind]) def test_solve_generalized_discrete_are(): mat20170120 = _load_data('gendare_20170120_data.npz') cases = [ # Two random examples differ by s term # in the absence of any literature for demanding examples. (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], [4.617139e-02, 6.948286e-01, 3.444608e-02], [9.713178e-02, 3.170995e-01, 4.387444e-01]]), np.array([[3.815585e-01, 1.868726e-01], [7.655168e-01, 4.897644e-01], [7.951999e-01, 4.455862e-01]]), np.eye(3), np.eye(2), np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], [7.093648e-01, 6.797027e-01, 1.189977e-01], [7.546867e-01, 6.550980e-01, 4.983641e-01]]), np.zeros((3, 2)), None), (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], [4.617139e-02, 6.948286e-01, 3.444608e-02], [9.713178e-02, 3.170995e-01, 4.387444e-01]]), np.array([[3.815585e-01, 1.868726e-01], [7.655168e-01, 4.897644e-01], [7.951999e-01, 4.455862e-01]]), np.eye(3), np.eye(2), np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], [7.093648e-01, 6.797027e-01, 1.189977e-01], [7.546867e-01, 6.550980e-01, 4.983641e-01]]), np.ones((3, 2)), None), # user-reported (under PR-6616) 20-Jan-2017 # tests against the case where E is None but S is provided (mat20170120['A'], mat20170120['B'], mat20170120['Q'], mat20170120['R'], None, mat20170120['S'], None), ] min_decimal = (11, 11, 16) def _test_factory(case, dec): """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" a, b, q, r, e, s, knownfailure = case if knownfailure: pytest.xfail(reason=knownfailure) x = solve_discrete_are(a, b, q, r, e, s) if e is None: e = np.eye(a.shape[0]) if s is None: s = np.zeros_like(b) res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q res -= (a.conj().T.dot(x.dot(b)) + s).dot( solve(r+b.conj().T.dot(x.dot(b)), (b.conj().T.dot(x.dot(a)) + s.conj().T) ) ) assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) for ind, case in enumerate(cases): _test_factory(case, min_decimal[ind]) def test_are_validate_args(): def test_square_shape(): nsq = np.ones((3, 2)) sq = np.eye(3) for x in (solve_continuous_are, solve_discrete_are): assert_raises(ValueError, x, nsq, 1, 1, 1) assert_raises(ValueError, x, sq, sq, nsq, 1) assert_raises(ValueError, x, sq, sq, sq, nsq) assert_raises(ValueError, x, sq, sq, sq, sq, nsq) def test_compatible_sizes(): nsq = np.ones((3, 2)) sq = np.eye(4) for x in (solve_continuous_are, solve_discrete_are): assert_raises(ValueError, x, sq, nsq, 1, 1) assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq) assert_raises(ValueError, x, sq, sq, np.eye(3), sq) assert_raises(ValueError, x, sq, sq, sq, np.eye(3)) assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3)) def test_symmetry(): nsym = np.arange(9).reshape(3, 3) sym = np.eye(3) for x in (solve_continuous_are, solve_discrete_are): assert_raises(ValueError, x, sym, sym, nsym, sym) assert_raises(ValueError, x, sym, sym, sym, nsym) def test_singularity(): sing = 1e12 * np.ones((3, 3)) sing[2, 2] -= 1 sq = np.eye(3) for x in (solve_continuous_are, solve_discrete_are): assert_raises(ValueError, x, sq, sq, sq, sq, sing) assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing) def test_finiteness(): nm = np.ones((2, 2)) * np.nan sq = np.eye(2) for x in (solve_continuous_are, solve_discrete_are): assert_raises(ValueError, x, nm, sq, sq, sq) assert_raises(ValueError, x, sq, nm, sq, sq) assert_raises(ValueError, x, sq, sq, nm, sq) assert_raises(ValueError, x, sq, sq, sq, nm) assert_raises(ValueError, x, sq, sq, sq, sq, nm) assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm) class TestSolveSylvester(object): cases = [ # a, b, c all real. (np.array([[1, 2], [0, 4]]), np.array([[5, 6], [0, 8]]), np.array([[9, 10], [11, 12]])), # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their # quasi-triangular form. (np.array([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]), np.array([[2.0, 0, 0, 1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]), np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])), # a, b, c all complex. (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), np.array([[-1.0, 2j], [3.0, 4.0]]), np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), # a and b real; c complex. (np.array([[1.0, 2.0], [3.0, 5.0]]), np.array([[-1.0, 0], [3.0, 4.0]]), np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), # a and c complex; b real. (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), np.array([[-1.0, 0], [3.0, 4.0]]), np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), # a complex; b and c real. (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), np.array([[-1.0, 0], [3.0, 4.0]]), np.array([[2.0, 2.0], [-1.0, 2.0]])), # not square matrices, real (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]), np.array([[2, 3], [4, 5]]), np.array([[1, 2], [3, 4], [5, 6]])), # not square matrices, complex (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]), np.array([[2, 3], [4, 5-1j]]), np.array([[1, 2j], [3, 4j], [5j, 6+7j]])), ] def check_case(self, a, b, c): x = solve_sylvester(a, b, c) assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c) def test_cases(self): for case in self.cases: self.check_case(case[0], case[1], case[2]) def test_trivial(self): a = np.array([[1.0, 0.0], [0.0, 1.0]]) b = np.array([[1.0]]) c = np.array([2.0, 2.0]).reshape(-1, 1) x = solve_sylvester(a, b, c) assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
31,063
39.447917
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_procrustes.py
from itertools import product, permutations import numpy as np from numpy.testing import assert_array_less, assert_allclose from pytest import raises as assert_raises from scipy.linalg import inv, eigh, norm from scipy.linalg import orthogonal_procrustes def test_orthogonal_procrustes_ndim_too_large(): np.random.seed(1234) A = np.random.randn(3, 4, 5) B = np.random.randn(3, 4, 5) assert_raises(ValueError, orthogonal_procrustes, A, B) def test_orthogonal_procrustes_ndim_too_small(): np.random.seed(1234) A = np.random.randn(3) B = np.random.randn(3) assert_raises(ValueError, orthogonal_procrustes, A, B) def test_orthogonal_procrustes_shape_mismatch(): np.random.seed(1234) shapes = ((3, 3), (3, 4), (4, 3), (4, 4)) for a, b in permutations(shapes, 2): A = np.random.randn(*a) B = np.random.randn(*b) assert_raises(ValueError, orthogonal_procrustes, A, B) def test_orthogonal_procrustes_checkfinite_exception(): np.random.seed(1234) m, n = 2, 3 A_good = np.random.randn(m, n) B_good = np.random.randn(m, n) for bad_value in np.inf, -np.inf, np.nan: A_bad = A_good.copy() A_bad[1, 2] = bad_value B_bad = B_good.copy() B_bad[1, 2] = bad_value for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)): assert_raises(ValueError, orthogonal_procrustes, A, B) def test_orthogonal_procrustes_scale_invariance(): np.random.seed(1234) m, n = 4, 3 for i in range(3): A_orig = np.random.randn(m, n) B_orig = np.random.randn(m, n) R_orig, s = orthogonal_procrustes(A_orig, B_orig) for A_scale in np.square(np.random.randn(3)): for B_scale in np.square(np.random.randn(3)): R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale) assert_allclose(R, R_orig) def test_orthogonal_procrustes_array_conversion(): np.random.seed(1234) for m, n in ((6, 4), (4, 4), (4, 6)): A_arr = np.random.randn(m, n) B_arr = np.random.randn(m, n) As = (A_arr, A_arr.tolist(), np.matrix(A_arr)) Bs = (B_arr, B_arr.tolist(), np.matrix(B_arr)) R_arr, s = orthogonal_procrustes(A_arr, B_arr) AR_arr = A_arr.dot(R_arr) for A, B in product(As, Bs): R, s = orthogonal_procrustes(A, B) AR = A_arr.dot(R) assert_allclose(AR, AR_arr) def test_orthogonal_procrustes(): np.random.seed(1234) for m, n in ((6, 4), (4, 4), (4, 6)): # Sample a random target matrix. B = np.random.randn(m, n) # Sample a random orthogonal matrix # by computing eigh of a sampled symmetric matrix. X = np.random.randn(n, n) w, V = eigh(X.T + X) assert_allclose(inv(V), V.T) # Compute a matrix with a known orthogonal transformation that gives B. A = np.dot(B, V.T) # Check that an orthogonal transformation from A to B can be recovered. R, s = orthogonal_procrustes(A, B) assert_allclose(inv(R), R.T) assert_allclose(A.dot(R), B) # Create a perturbed input matrix. A_perturbed = A + 1e-2 * np.random.randn(m, n) # Check that the orthogonal procrustes function can find an orthogonal # transformation that is better than the orthogonal transformation # computed from the original input matrix. R_prime, s = orthogonal_procrustes(A_perturbed, B) assert_allclose(inv(R_prime), R_prime.T) # Compute the naive and optimal transformations of the perturbed input. naive_approx = A_perturbed.dot(R) optim_approx = A_perturbed.dot(R_prime) # Compute the Frobenius norm errors of the matrix approximations. naive_approx_error = norm(naive_approx - B, ord='fro') optim_approx_error = norm(optim_approx - B, ord='fro') # Check that the orthogonal Procrustes approximation is better. assert_array_less(optim_approx_error, naive_approx_error) def _centered(A): mu = A.mean(axis=0) return A - mu, mu def test_orthogonal_procrustes_exact_example(): # Check a small application. # It uses translation, scaling, reflection, and rotation. # # | # a b | # | # d c | w # | # --------+--- x ----- z --- # | # | y # | # A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float) A, A_mu = _centered(A_orig) B, B_mu = _centered(B_orig) R, s = orthogonal_procrustes(A, B) scale = s / np.square(norm(A)) B_approx = scale * np.dot(A, R) + B_mu assert_allclose(B_approx, B_orig, atol=1e-8) def test_orthogonal_procrustes_stretched_example(): # Try again with a target with a stretched y axis. A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float) A, A_mu = _centered(A_orig) B, B_mu = _centered(B_orig) R, s = orthogonal_procrustes(A, B) scale = s / np.square(norm(A)) B_approx = scale * np.dot(A, R) + B_mu expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float) assert_allclose(B_approx, expected, atol=1e-8) # Check disparity symmetry. expected_disparity = 0.4501246882793018 AB_disparity = np.square(norm(B_approx - B_orig) / norm(B)) assert_allclose(AB_disparity, expected_disparity) R, s = orthogonal_procrustes(B, A) scale = s / np.square(norm(B)) A_approx = scale * np.dot(B, R) + A_mu BA_disparity = np.square(norm(A_approx - A_orig) / norm(A)) assert_allclose(BA_disparity, expected_disparity) def test_orthogonal_procrustes_skbio_example(): # This transformation is also exact. # It uses translation, scaling, and reflection. # # | # | a # | b # | c d # --+--------- # | # | w # | # | x # | # | z y # | # A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float) B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float) B_standardized = np.array([ [-0.13363062, 0.6681531], [-0.13363062, 0.13363062], [-0.13363062, -0.40089186], [0.40089186, -0.40089186]]) A, A_mu = _centered(A_orig) B, B_mu = _centered(B_orig) R, s = orthogonal_procrustes(A, B) scale = s / np.square(norm(A)) B_approx = scale * np.dot(A, R) + B_mu assert_allclose(B_approx, B_orig) assert_allclose(B / norm(B), B_standardized)
6,723
34.204188
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_special_matrices.py
"""Tests for functions in special_matrices.py.""" from __future__ import division, print_function, absolute_import import numpy as np from numpy import arange, add, array, eye, copy, sqrt from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal, assert_allclose) from pytest import raises as assert_raises from scipy._lib.six import xrange from scipy import fftpack from scipy.special import comb from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, companion, tri, triu, tril, kron, block_diag, helmert, hilbert, invhilbert, pascal, invpascal, dft) from numpy.linalg import cond def get_mat(n): data = arange(n) data = add.outer(data,data) return data class TestTri(object): def test_basic(self): assert_equal(tri(4),array([[1,0,0,0], [1,1,0,0], [1,1,1,0], [1,1,1,1]])) assert_equal(tri(4,dtype='f'),array([[1,0,0,0], [1,1,0,0], [1,1,1,0], [1,1,1,1]],'f')) def test_diag(self): assert_equal(tri(4,k=1),array([[1,1,0,0], [1,1,1,0], [1,1,1,1], [1,1,1,1]])) assert_equal(tri(4,k=-1),array([[0,0,0,0], [1,0,0,0], [1,1,0,0], [1,1,1,0]])) def test_2d(self): assert_equal(tri(4,3),array([[1,0,0], [1,1,0], [1,1,1], [1,1,1]])) assert_equal(tri(3,4),array([[1,0,0,0], [1,1,0,0], [1,1,1,0]])) def test_diag2d(self): assert_equal(tri(3,4,k=2),array([[1,1,1,0], [1,1,1,1], [1,1,1,1]])) assert_equal(tri(4,3,k=-2),array([[0,0,0], [0,0,0], [1,0,0], [1,1,0]])) class TestTril(object): def test_basic(self): a = (100*get_mat(5)).astype('l') b = a.copy() for k in range(5): for l in range(k+1,5): b[k,l] = 0 assert_equal(tril(a),b) def test_diag(self): a = (100*get_mat(5)).astype('f') b = a.copy() for k in range(5): for l in range(k+3,5): b[k,l] = 0 assert_equal(tril(a,k=2),b) b = a.copy() for k in range(5): for l in range(max((k-1,0)),5): b[k,l] = 0 assert_equal(tril(a,k=-2),b) class TestTriu(object): def test_basic(self): a = (100*get_mat(5)).astype('l') b = a.copy() for k in range(5): for l in range(k+1,5): b[l,k] = 0 assert_equal(triu(a),b) def test_diag(self): a = (100*get_mat(5)).astype('f') b = a.copy() for k in range(5): for l in range(max((k-1,0)),5): b[l,k] = 0 assert_equal(triu(a,k=2),b) b = a.copy() for k in range(5): for l in range(k+3,5): b[l,k] = 0 assert_equal(triu(a,k=-2),b) class TestToeplitz(object): def test_basic(self): y = toeplitz([1,2,3]) assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]]) y = toeplitz([1,2,3],[1,4,5]) assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]]) def test_complex_01(self): data = (1.0 + arange(3.0)) * (1.0 + 1.0j) x = copy(data) t = toeplitz(x) # Calling toeplitz should not change x. assert_array_equal(x, data) # According to the docstring, x should be the first column of t. col0 = t[:,0] assert_array_equal(col0, data) assert_array_equal(t[0,1:], data[1:].conj()) def test_scalar_00(self): """Scalar arguments still produce a 2D array.""" t = toeplitz(10) assert_array_equal(t, [[10]]) t = toeplitz(10, 20) assert_array_equal(t, [[10]]) def test_scalar_01(self): c = array([1,2,3]) t = toeplitz(c, 1) assert_array_equal(t, [[1],[2],[3]]) def test_scalar_02(self): c = array([1,2,3]) t = toeplitz(c, array(1)) assert_array_equal(t, [[1],[2],[3]]) def test_scalar_03(self): c = array([1,2,3]) t = toeplitz(c, array([1])) assert_array_equal(t, [[1],[2],[3]]) def test_scalar_04(self): r = array([10,2,3]) t = toeplitz(1, r) assert_array_equal(t, [[1,2,3]]) class TestHankel(object): def test_basic(self): y = hankel([1,2,3]) assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]]) y = hankel([1,2,3], [3,4,5]) assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]]) class TestCirculant(object): def test_basic(self): y = circulant([1,2,3]) assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]]) class TestHadamard(object): def test_basic(self): y = hadamard(1) assert_array_equal(y, [[1]]) y = hadamard(2, dtype=float) assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) y = hadamard(4) assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]]) assert_raises(ValueError, hadamard, 0) assert_raises(ValueError, hadamard, 5) class TestLeslie(object): def test_bad_shapes(self): assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5]) assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]]) assert_raises(ValueError, leslie, [1,2], [1,2]) assert_raises(ValueError, leslie, [1], []) def test_basic(self): a = leslie([1, 2, 3], [0.25, 0.5]) expected = array([ [1.0, 2.0, 3.0], [0.25, 0.0, 0.0], [0.0, 0.5, 0.0]]) assert_array_equal(a, expected) class TestCompanion(object): def test_bad_shapes(self): assert_raises(ValueError, companion, [[1,1],[2,2]]) assert_raises(ValueError, companion, [0,4,5]) assert_raises(ValueError, companion, [1]) assert_raises(ValueError, companion, []) def test_basic(self): c = companion([1, 2, 3]) expected = array([ [-2.0, -3.0], [1.0, 0.0]]) assert_array_equal(c, expected) c = companion([2.0, 5.0, -10.0]) expected = array([ [-2.5, 5.0], [1.0, 0.0]]) assert_array_equal(c, expected) class TestBlockDiag: def test_basic(self): x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]]) assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 2, 0, 0, 0], [0, 0, 3, 4, 0, 0, 0], [0, 0, 5, 6, 0, 0, 0], [0, 0, 0, 0, 1, 2, 3]]) def test_dtype(self): x = block_diag([[1.5]]) assert_equal(x.dtype, float) x = block_diag([[True]]) assert_equal(x.dtype, bool) def test_mixed_dtypes(self): actual = block_diag([[1]], [[1j]]) desired = np.array([[1, 0], [0, 1j]]) assert_array_equal(actual, desired) def test_scalar_and_1d_args(self): a = block_diag(1) assert_equal(a.shape, (1,1)) assert_array_equal(a, [[1]]) a = block_diag([2,3], 4) assert_array_equal(a, [[2, 3, 0], [0, 0, 4]]) def test_bad_arg(self): assert_raises(ValueError, block_diag, [[[1]]]) def test_no_args(self): a = block_diag() assert_equal(a.ndim, 2) assert_equal(a.nbytes, 0) def test_empty_matrix_arg(self): # regression test for gh-4596: check the shape of the result # for empty matrix inputs. Empty matrices are no longer ignored # (gh-4908) it is viewed as a shape (1, 0) matrix. a = block_diag([[1, 0], [0, 1]], [], [[2, 3], [4, 5], [6, 7]]) assert_array_equal(a, [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 2, 3], [0, 0, 4, 5], [0, 0, 6, 7]]) def test_zerosized_matrix_arg(self): # test for gh-4908: check the shape of the result for # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0). # note that [[]] takes shape (1,0) a = block_diag([[1, 0], [0, 1]], [[]], [[2, 3], [4, 5], [6, 7]], np.zeros([0,2],dtype='int32')) assert_array_equal(a, [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 2, 3, 0, 0], [0, 0, 4, 5, 0, 0], [0, 0, 6, 7, 0, 0]]) class TestKron: def test_basic(self): a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]])) assert_array_equal(a, array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]])) m1 = array([[1, 2], [3, 4]]) m2 = array([[10], [11]]) a = kron(m1, m2) expected = array([[10, 20], [11, 22], [30, 40], [33, 44]]) assert_array_equal(a, expected) class TestHelmert(object): def test_orthogonality(self): for n in range(1, 7): H = helmert(n, full=True) Id = np.eye(n) assert_allclose(H.dot(H.T), Id, atol=1e-12) assert_allclose(H.T.dot(H), Id, atol=1e-12) def test_subspace(self): for n in range(2, 7): H_full = helmert(n, full=True) H_partial = helmert(n) for U in H_full[1:, :].T, H_partial.T: C = np.eye(n) - np.ones((n, n)) / n assert_allclose(U.dot(U.T), C) assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12) class TestHilbert(object): def test_basic(self): h3 = array([[1.0, 1/2., 1/3.], [1/2., 1/3., 1/4.], [1/3., 1/4., 1/5.]]) assert_array_almost_equal(hilbert(3), h3) assert_array_equal(hilbert(1), [[1.0]]) h0 = hilbert(0) assert_equal(h0.shape, (0,0)) class TestInvHilbert(object): def test_basic(self): invh1 = array([[1]]) assert_array_equal(invhilbert(1, exact=True), invh1) assert_array_equal(invhilbert(1), invh1) invh2 = array([[4, -6], [-6, 12]]) assert_array_equal(invhilbert(2, exact=True), invh2) assert_array_almost_equal(invhilbert(2), invh2) invh3 = array([[9, -36, 30], [-36, 192, -180], [30, -180, 180]]) assert_array_equal(invhilbert(3, exact=True), invh3) assert_array_almost_equal(invhilbert(3), invh3) invh4 = array([[16, -120, 240, -140], [-120, 1200, -2700, 1680], [240, -2700, 6480, -4200], [-140, 1680, -4200, 2800]]) assert_array_equal(invhilbert(4, exact=True), invh4) assert_array_almost_equal(invhilbert(4), invh4) invh5 = array([[25, -300, 1050, -1400, 630], [-300, 4800, -18900, 26880, -12600], [1050, -18900, 79380, -117600, 56700], [-1400, 26880, -117600, 179200, -88200], [630, -12600, 56700, -88200, 44100]]) assert_array_equal(invhilbert(5, exact=True), invh5) assert_array_almost_equal(invhilbert(5), invh5) invh17 = array([ [289, -41616, 1976760, -46124400, 629598060, -5540462928, 33374693352, -143034400080, 446982500250, -1033026222800, 1774926873720, -2258997839280, 2099709530100, -1384423866000, 613101997800, -163493866080, 19835652870], [-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512, -8410422724704, 36616806420480, -115857864064800, 270465047424000, -468580694662080, 600545887119360, -561522320049600, 372133135180800, -165537539406000, 44316454993920, -5395297580640], [1976760, -426980160, 24337869120, -630981792000, 9228108708000, -85267724461920, 532660105897920, -2348052711713280, 7504429831470000, -17664748409880000, 30818191841236800, -39732544853164800, 37341234283298400, -24857330514030000, 11100752642520000, -2982128117299200, 364182586693200], [-46124400, 10627061760, -630981792000, 16826181120000, -251209625940000, 2358021022156800, -14914482965141760, 66409571644416000, -214015221119700000, 507295338950400000, -890303319857952000, 1153715376477081600, -1089119333262870000, 727848632044800000, -326170262829600000, 87894302404608000, -10763618673376800], [629598060, -151103534400, 9228108708000, -251209625940000, 3810012660090000, -36210360321495360, 231343968720664800, -1038687206500944000, 3370739732635275000, -8037460526495400000, 14178080368737885600, -18454939322943942000, 17489975175339030000, -11728977435138600000, 5272370630081100000, -1424711708039692800, 174908803442373000], [-5540462928, 1367702848512, -85267724461920, 2358021022156800, -36210360321495360, 347619459086355456, -2239409617216035264, 10124803292907663360, -33052510749726468000, 79217210949138662400, -140362995650505067440, 183420385176741672960, -174433352415381259200, 117339159519533952000, -52892422160973595200, 14328529177999196160, -1763080738699119840], [33374693352, -8410422724704, 532660105897920, -14914482965141760, 231343968720664800, -2239409617216035264, 14527452132196331328, -66072377044391477760, 216799987176909536400, -521925895055522958000, 928414062734059661760, -1217424500995626443520, 1161358898976091015200, -783401860847777371200, 354015418167362952000, -96120549902411274240, 11851820521255194480], [-143034400080, 36616806420480, -2348052711713280, 66409571644416000, -1038687206500944000, 10124803292907663360, -66072377044391477760, 302045152202932469760, -995510145200094810000, 2405996923185123840000, -4294704507885446054400, 5649058909023744614400, -5403874060541811254400, 3654352703663101440000, -1655137020003255360000, 450325202737117593600, -55630994283442749600], [446982500250, -115857864064800, 7504429831470000, -214015221119700000, 3370739732635275000, -33052510749726468000, 216799987176909536400, -995510145200094810000, 3293967392206196062500, -7988661659013106500000, 14303908928401362270000, -18866974090684772052000, 18093328327706957325000, -12263364009096700500000, 5565847995255512250000, -1517208935002984080000, 187754605706619279900], [-1033026222800, 270465047424000, -17664748409880000, 507295338950400000, -8037460526495400000, 79217210949138662400, -521925895055522958000, 2405996923185123840000, -7988661659013106500000, 19434404971634224000000, -34894474126569249192000, 46141453390504792320000, -44349976506971935800000, 30121928988527376000000, -13697025107665828500000, 3740200989399948902400, -463591619028689580000], [1774926873720, -468580694662080, 30818191841236800, -890303319857952000, 14178080368737885600, -140362995650505067440, 928414062734059661760, -4294704507885446054400, 14303908928401362270000, -34894474126569249192000, 62810053427824648545600, -83243376594051600326400, 80177044485212743068000, -54558343880470209780000, 24851882355348879230400, -6797096028813368678400, 843736746632215035600], [-2258997839280, 600545887119360, -39732544853164800, 1153715376477081600, -18454939322943942000, 183420385176741672960, -1217424500995626443520, 5649058909023744614400, -18866974090684772052000, 46141453390504792320000, -83243376594051600326400, 110552468520163390156800, -106681852579497947388000, 72720410752415168870400, -33177973900974346080000, 9087761081682520473600, -1129631016152221783200], [2099709530100, -561522320049600, 37341234283298400, -1089119333262870000, 17489975175339030000, -174433352415381259200, 1161358898976091015200, -5403874060541811254400, 18093328327706957325000, -44349976506971935800000, 80177044485212743068000, -106681852579497947388000, 103125790826848015808400, -70409051543137015800000, 32171029219823375700000, -8824053728865840192000, 1098252376814660067000], [-1384423866000, 372133135180800, -24857330514030000, 727848632044800000, -11728977435138600000, 117339159519533952000, -783401860847777371200, 3654352703663101440000, -12263364009096700500000, 30121928988527376000000, -54558343880470209780000, 72720410752415168870400, -70409051543137015800000, 48142941226076592000000, -22027500987368499000000, 6049545098753157120000, -753830033789944188000], [613101997800, -165537539406000, 11100752642520000, -326170262829600000, 5272370630081100000, -52892422160973595200, 354015418167362952000, -1655137020003255360000, 5565847995255512250000, -13697025107665828500000, 24851882355348879230400, -33177973900974346080000, 32171029219823375700000, -22027500987368499000000, 10091416708498869000000, -2774765838662800128000, 346146444087219270000], [-163493866080, 44316454993920, -2982128117299200, 87894302404608000, -1424711708039692800, 14328529177999196160, -96120549902411274240, 450325202737117593600, -1517208935002984080000, 3740200989399948902400, -6797096028813368678400, 9087761081682520473600, -8824053728865840192000, 6049545098753157120000, -2774765838662800128000, 763806510427609497600, -95382575704033754400], [19835652870, -5395297580640, 364182586693200, -10763618673376800, 174908803442373000, -1763080738699119840, 11851820521255194480, -55630994283442749600, 187754605706619279900, -463591619028689580000, 843736746632215035600, -1129631016152221783200, 1098252376814660067000, -753830033789944188000, 346146444087219270000, -95382575704033754400, 11922821963004219300] ]) assert_array_equal(invhilbert(17, exact=True), invh17) assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12) def test_inverse(self): for n in xrange(1, 10): a = hilbert(n) b = invhilbert(n) # The Hilbert matrix is increasingly badly conditioned, # so take that into account in the test c = cond(a) assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c) class TestPascal(object): cases = [ (1, array([[1]]), array([[1]])), (2, array([[1, 1], [1, 2]]), array([[1, 0], [1, 1]])), (3, array([[1, 1, 1], [1, 2, 3], [1, 3, 6]]), array([[1, 0, 0], [1, 1, 0], [1, 2, 1]])), (4, array([[1, 1, 1, 1], [1, 2, 3, 4], [1, 3, 6, 10], [1, 4, 10, 20]]), array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 2, 1, 0], [1, 3, 3, 1]])), ] def check_case(self, n, sym, low): assert_array_equal(pascal(n), sym) assert_array_equal(pascal(n, kind='lower'), low) assert_array_equal(pascal(n, kind='upper'), low.T) assert_array_almost_equal(pascal(n, exact=False), sym) assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low) assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T) def test_cases(self): for n, sym, low in self.cases: self.check_case(n, sym, low) def test_big(self): p = pascal(50) assert_equal(p[-1, -1], comb(98, 49, exact=True)) def test_threshold(self): # Regression test. An early version of `pascal` returned an # array of type np.uint64 for n=35, but that data type is too small # to hold p[-1, -1]. The second assert_equal below would fail # because p[-1, -1] overflowed. p = pascal(34) assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34") p = pascal(35) assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35") def test_invpascal(): def check_invpascal(n, kind, exact): ip = invpascal(n, kind=kind, exact=exact) p = pascal(n, kind=kind, exact=exact) # Matrix-multiply ip and p, and check that we get the identity matrix. # We can't use the simple expression e = ip.dot(p), because when # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is # np.int64. The product of those dtypes is np.float64, which loses # precision when n is greater than 18. Instead we'll cast both to # object arrays, and then multiply. e = ip.astype(object).dot(p.astype(object)) assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" % (n, kind, exact)) kinds = ['symmetric', 'lower', 'upper'] ns = [1, 2, 5, 18] for n in ns: for kind in kinds: for exact in [True, False]: check_invpascal(n, kind, exact) ns = [19, 34, 35, 50] for n in ns: for kind in kinds: check_invpascal(n, kind, True) def test_dft(): m = dft(2) expected = array([[1.0, 1.0], [1.0, -1.0]]) assert_array_almost_equal(m, expected) m = dft(2, scale='n') assert_array_almost_equal(m, expected/2.0) m = dft(2, scale='sqrtn') assert_array_almost_equal(m, expected/sqrt(2.0)) x = array([0, 1, 2, 3, 4, 5, 0, 1]) m = dft(8) mx = m.dot(x) fx = fftpack.fft(x) assert_array_almost_equal(mx, fx)
23,561
38.335559
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_basic.py
# # Created by: Pearu Peterson, March 2002 # """ Test functions for linalg.basic module """ from __future__ import division, print_function, absolute_import import warnings import itertools import numpy as np from numpy import (arange, array, dot, zeros, identity, conjugate, transpose, float32) import numpy.linalg as linalg from numpy.random import random from numpy.testing import (assert_equal, assert_almost_equal, assert_, assert_array_almost_equal, assert_allclose, assert_array_equal) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings from scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm, solve_banded, solveh_banded, solve_triangular, solve_circulant, circulant, LinAlgError, block_diag, matrix_balance, LinAlgWarning) from scipy.linalg.basic import LstsqLapackError from scipy.linalg._testutils import assert_no_overwrite from scipy._lib._version import NumpyVersion """ Bugs: 1) solve.check_random_sym_complex fails if a is complex and transpose(a) = conjugate(a) (a is Hermitian). """ __usage__ = """ Build linalg: python setup_linalg.py build Run tests if scipy is installed: python -c 'import scipy;scipy.linalg.test()' Run tests if linalg is not installed: python tests/test_basic.py """ REAL_DTYPES = [np.float32, np.float64, np.longdouble] COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble] DTYPES = REAL_DTYPES + COMPLEX_DTYPES def _eps_cast(dtyp): """Get the epsilon for dtype, possibly downcast to BLAS types.""" dt = dtyp if dt == np.longdouble: dt = np.float64 elif dt == np.clongdouble: dt = np.complex128 return np.finfo(dt).eps class TestSolveBanded(object): def test_real(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) def test_complex(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2j, 1, 20, 2j], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2j], [1, 4, 20, 14], [-30, 1, 7, 0], [2j, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0j]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1j], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) def test_tridiag_real(self): ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0]]) a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( ab[2, :-1], -1) b4 = array([10.0, 0.0, 2.0, 14.0]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((1, 1), ab, b) assert_array_almost_equal(dot(a, x), b) def test_tridiag_complex(self): ab = array([[0.0, 20, 6, 2j], [1, 4, 20, 14], [-30, 1, 7, 0]]) a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( ab[2, :-1], -1) b4 = array([10.0, 0.0, 2.0, 14.0j]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((1, 1), ab, b) assert_array_almost_equal(dot(a, x), b) def test_check_finite(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0]) x = solve_banded((l, u), ab, b4, check_finite=False) assert_array_almost_equal(dot(a, x), b4) def test_bad_shape(self): ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4) assert_raises(ValueError, solve_banded, (l, u), ab, bad) assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0]) # Values of (l,u) are not compatible with ab. assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0]) def test_1x1(self): b = array([[1., 2., 3.]]) x = solve_banded((1, 1), [[0], [2], [0]], b) assert_array_equal(x, [[0.5, 1.0, 1.5]]) assert_equal(x.dtype, np.dtype('f8')) assert_array_equal(b, [[1.0, 2.0, 3.0]]) def test_native_list_arguments(self): a = [[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]] ab = [[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]] l, u = 2, 1 b = [10.0, 0.0, 2.0, 14.0] x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) class TestSolveHBanded(object): def test_01_upper(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # with the RHS as a 1D array. ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0, 2.0]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_upper(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_03_upper(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # with the RHS as a 2D array with shape (3,1). ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1) x = solveh_banded(ab, b) assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1)) def test_01_lower(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, -99], [2.0, 2.0, 0.0, 0.0]]) b = array([1.0, 4.0, 1.0, 2.0]) x = solveh_banded(ab, b, lower=True) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_lower(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, -99], [2.0, 2.0, 0.0, 0.0]]) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]]) x = solveh_banded(ab, b, lower=True) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_01_float32(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]], dtype=float32) b = array([1.0, 4.0, 1.0, 2.0], dtype=float32) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_float32(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]], dtype=float32) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]], dtype=float32) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_01_complex(self): # Solve # [ 4 -j 2 0] [2-j] # [ j 4 -j 2] X = [4-j] # [ 2 j 4 -j] [4+j] # [ 0 2 j 4] [2+j] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, -1.0j, -1.0j, -1.0j], [4.0, 4.0, 4.0, 4.0]]) b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0]) def test_02_complex(self): # Solve # [ 4 -j 2 0] [2-j 2+4j] # [ j 4 -j 2] X = [4-j -1-j] # [ 2 j 4 -j] [4+j 4+2j] # [ 0 2 j 4] [2+j j] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, -1.0j, -1.0j, -1.0j], [4.0, 4.0, 4.0, 4.0]]) b = array([[2-1j, 2+4j], [4.0-1j, -1-1j], [4.0+1j, 4+2j], [2+1j, 1j]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0j], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_upper(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 1D array. ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_upper(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_03_upper(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 2D array with shape (3,1). ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]).reshape(-1, 1) x = solveh_banded(ab, b) assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1)) def test_tridiag_01_lower(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # ab = array([[4.0, 4.0, 4.0], [1.0, 1.0, -99]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b, lower=True) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_lower(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[4.0, 4.0, 4.0], [1.0, 1.0, -99]]) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]]) x = solveh_banded(ab, b, lower=True) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_float32(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) b = array([1.0, 4.0, 1.0], dtype=float32) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_float32(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]], dtype=float32) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_complex(self): # Solve # [ 4 -j 0] [ -j] # [ j 4 -j] X = [4-j] # [ 0 j 4] [4+j] # ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) b = array([-1.0j, 4.0-1j, 4+1j]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 1.0]) def test_tridiag_02_complex(self): # Solve # [ 4 -j 0] [ -j 4j] # [ j 4 -j] X = [4-j -1-j] # [ 0 j 4] [4+j 4 ] # ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) b = array([[-1j, 4.0j], [4.0-1j, -1.0-1j], [4.0+1j, 4.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0j], [1.0, 0.0], [1.0, 1.0]]) assert_array_almost_equal(x, expected) def test_check_finite(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 1D array. ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b, check_finite=False) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_bad_shapes(self): ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([[1.0, 4.0], [4.0, 2.0]]) assert_raises(ValueError, solveh_banded, ab, b) assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0]) assert_raises(ValueError, solveh_banded, ab, [1.0]) def test_1x1(self): x = solveh_banded([[1]], [[1, 2, 3]]) assert_array_equal(x, [[1.0, 2.0, 3.0]]) assert_equal(x.dtype, np.dtype('f8')) def test_native_list_arguments(self): # Same as test_01_upper, using python's native list. ab = [[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]] b = [1.0, 4.0, 1.0, 2.0] x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) class TestSolve(object): def setup_method(self): np.random.seed(1234) def test_20Feb04_bug(self): a = [[1, 1], [1.0, 0]] # ok x0 = solve(a, [1, 0j]) assert_array_almost_equal(dot(a, x0), [1, 0]) # gives failure with clapack.zgesv(..,rowmajor=0) a = [[1, 1], [1.2, 0]] b = [1, 0j] x0 = solve(a, b) assert_array_almost_equal(dot(a, x0), [1, 0]) def test_simple(self): a = [[1, 20], [-30, 4]] for b in ([[1, 0], [0, 1]], [1, 0], [[2, 1], [-30, 4]]): x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_simple_sym(self): a = [[2, 3], [3, 5]] for lower in [0, 1]: for b in ([[1, 0], [0, 1]], [1, 0]): x = solve(a, b, sym_pos=1, lower=lower) assert_array_almost_equal(dot(a, x), b) def test_simple_sym_complex(self): a = [[5, 2], [2, 4]] for b in [[1j, 0], [[1j, 1j], [0, 2]], ]: x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_simple_complex(self): a = array([[5, 2], [2j, 4]], 'D') for b in [[1j, 0], [[1j, 1j], [0, 2]], [1, 0j], array([1, 0], 'D'), ]: x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_nils_20Feb04(self): n = 2 A = random([n, n])+random([n, n])*1j X = zeros((n, n), 'D') Ainv = inv(A) R = identity(n)+identity(n)*0j for i in arange(0, n): r = R[:, i] X[:, i] = solve(A, r) assert_array_almost_equal(X, Ainv) def test_random(self): n = 20 a = random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) for i in range(4): b = random([n, 3]) x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_random_complex(self): n = 20 a = random([n, n]) + 1j * random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) for i in range(2): b = random([n, 3]) x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_random_sym(self): n = 20 a = random([n, n]) for i in range(n): a[i, i] = abs(20*(.1+a[i, i])) for j in range(i): a[i, j] = a[j, i] for i in range(4): b = random([n]) x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_random_sym_complex(self): n = 20 a = random([n, n]) # XXX: with the following addition the accuracy will be very low a = a + 1j*random([n, n]) for i in range(n): a[i, i] = abs(20*(.1+a[i, i])) for j in range(i): a[i, j] = conjugate(a[j, i]) b = random([n])+2j*random([n]) for i in range(2): x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_check_finite(self): a = [[1, 20], [-30, 4]] for b in ([[1, 0], [0, 1]], [1, 0], [[2, 1], [-30, 4]]): x = solve(a, b, check_finite=False) assert_array_almost_equal(dot(a, x), b) def test_scalar_a_and_1D_b(self): a = 1 b = [1, 2, 3] x = solve(a, b) assert_array_almost_equal(x.ravel(), b) assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape') def test_simple2(self): a = np.array([[1.80, 2.88, 2.05, -0.89], [525.00, -295.00, -95.00, -380.00], [1.58, -2.69, -2.90, -1.04], [-1.11, -0.66, -0.59, 0.80]]) b = np.array([[9.52, 18.47], [2435.00, 225.00], [0.77, -13.28], [-6.22, -6.21]]) x = solve(a, b) assert_array_almost_equal(x, np.array([[1., -1, 3, -5], [3, 2, 4, 1]]).T) def test_simple_complex2(self): a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j], [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j], [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j], [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]]) b = np.array([[26.26+51.78j, 31.32-6.70j], [64.30-86.80j, 158.60-14.20j], [-5.75+25.31j, -2.15+30.19j], [1.16+2.57j, -2.56+7.55j]]) x = solve(a, b) assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j], [2-3.j, 5+1.j], [-4-5.j, -3+4.j], [6.j, 2-3.j]])) def test_hermitian(self): # An upper triangular matrix will be used for hermitian matrix a a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j], [0, -4.63, -1.84+0.03j, 2.21+0.21j], [0, 0, -8.87, 1.58-0.90j], [0, 0, 0, -1.36]]) b = np.array([[2.98-10.18j, 28.68-39.89j], [-9.58+3.88j, -24.79-8.40j], [-0.77-16.05j, 4.23-70.02j], [7.79+5.48j, -35.39+18.01j]]) res = np.array([[2.+1j, -8+6j], [3.-2j, 7-2j], [-1+2j, -1+5j], [1.-1j, 3-4j]]) x = solve(a, b, assume_a='her') assert_array_almost_equal(x, res) # Also conjugate a and test for lower triangular data x = solve(a.conj().T, b, assume_a='her', lower=True) assert_array_almost_equal(x, res) def test_pos_and_sym(self): A = np.arange(1, 10).reshape(3, 3) x = solve(np.tril(A)/9, np.ones(3), assume_a='pos') assert_array_almost_equal(x, [9., 1.8, 1.]) x = solve(np.tril(A)/9, np.ones(3), assume_a='sym') assert_array_almost_equal(x, [9., 1.8, 1.]) def test_singularity(self): a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1], [1, 1, 1, 0, 0, 0, 1, 0, 1], [0, 1, 1, 0, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]]) b = np.arange(9)[:, None] assert_raises(LinAlgError, solve, a, b) def test_ill_condition_warning(self): a = np.array([[1, 1], [1+1e-16, 1-1e-16]]) b = np.ones(2) with warnings.catch_warnings(): warnings.simplefilter('error') assert_raises(LinAlgWarning, solve, a, b) def test_empty_rhs(self): a = np.eye(2) b = [[], []] x = solve(a, b) assert_(x.size == 0, 'Returned array is not empty') assert_(x.shape == (2, 0), 'Returned empty array shape is wrong') def test_multiple_rhs(self): a = np.eye(2) b = np.random.rand(2, 3, 4) x = solve(a, b) assert_array_almost_equal(x, b) def test_transposed_keyword(self): A = np.arange(9).reshape(3, 3) + 1 x = solve(np.tril(A)/9, np.ones(3), transposed=True) assert_array_almost_equal(x, [1.2, 0.2, 1]) x = solve(np.tril(A)/9, np.ones(3), transposed=False) assert_array_almost_equal(x, [9, -5.4, -1.2]) def test_transposed_notimplemented(self): a = np.eye(3).astype(complex) with assert_raises(NotImplementedError): solve(a, a, transposed=True) def test_nonsquare_a(self): assert_raises(ValueError, solve, [1, 2], 1) def test_size_mismatch_with_1D_b(self): assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3)) assert_raises(ValueError, solve, np.eye(3), np.ones(4)) def test_assume_a_keyword(self): assert_raises(ValueError, solve, 1, 1, assume_a='zxcv') @pytest.mark.skip(reason="Failure on OS X (gh-7500), " "crash on Windows (gh-8064)") def test_all_type_size_routine_combinations(self): sizes = [10, 100] assume_as = ['gen', 'sym', 'pos', 'her'] dtypes = [np.float32, np.float64, np.complex64, np.complex128] for size, assume_a, dtype in itertools.product(sizes, assume_as, dtypes): is_complex = dtype in (np.complex64, np.complex128) if assume_a == 'her' and not is_complex: continue err_msg = ("Failed for size: {}, assume_a: {}," "dtype: {}".format(size, assume_a, dtype)) a = np.random.randn(size, size).astype(dtype) b = np.random.randn(size).astype(dtype) if is_complex: a = a + (1j*np.random.randn(size, size)).astype(dtype) if assume_a == 'sym': # Can still be complex but only symmetric a = a + a.T elif assume_a == 'her': # Handle hermitian matrices here instead a = a + a.T.conj() elif assume_a == 'pos': a = a.conj().T.dot(a) + 0.1*np.eye(size) tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6 if assume_a in ['gen', 'sym', 'her']: # We revert the tolerance from before # 4b4a6e7c34fa4060533db38f9a819b98fa81476c if dtype in (np.float32, np.complex64): tol *= 10 x = solve(a, b, assume_a=assume_a) assert_allclose(a.dot(x), b, atol=tol * size, rtol=tol * size, err_msg=err_msg) if assume_a == 'sym' and dtype not in (np.complex64, np.complex128): x = solve(a, b, assume_a=assume_a, transposed=True) assert_allclose(a.dot(x), b, atol=tol * size, rtol=tol * size, err_msg=err_msg) class TestSolveTriangular(object): def test_simple(self): """ solve_triangular on a simple 2x2 matrix. """ A = array([[1, 0], [1, 2]]) b = [1, 1] sol = solve_triangular(A, b, lower=True) assert_array_almost_equal(sol, [1, 0]) # check that it works also for non-contiguous matrices sol = solve_triangular(A.T, b, lower=False) assert_array_almost_equal(sol, [.5, .5]) # and that it gives the same result as trans=1 sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [.5, .5]) b = identity(2) sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]]) def test_simple_complex(self): """ solve_triangular on a simple 2x2 complex matrix """ A = array([[1+1j, 0], [1j, 2]]) b = identity(2) sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]]) def test_check_finite(self): """ solve_triangular on a simple 2x2 matrix. """ A = array([[1, 0], [1, 2]]) b = [1, 1] sol = solve_triangular(A, b, lower=True, check_finite=False) assert_array_almost_equal(sol, [1, 0]) class TestInv(object): def setup_method(self): np.random.seed(1234) def test_simple(self): a = [[1, 2], [3, 4]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), np.eye(2)) a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), np.eye(3)) def test_random(self): n = 20 for i in range(4): a = random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), identity(n)) def test_simple_complex(self): a = [[1, 2], [3, 4j]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) def test_random_complex(self): n = 20 for i in range(4): a = random([n, n])+2j*random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), identity(n)) def test_check_finite(self): a = [[1, 2], [3, 4]] a_inv = inv(a, check_finite=False) assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) class TestDet(object): def setup_method(self): np.random.seed(1234) def test_simple(self): a = [[1, 2], [3, 4]] a_det = det(a) assert_almost_equal(a_det, -2.0) def test_simple_complex(self): a = [[1, 2], [3, 4j]] a_det = det(a) assert_almost_equal(a_det, -6+4j) def test_random(self): basic_det = linalg.det n = 20 for i in range(4): a = random([n, n]) d1 = det(a) d2 = basic_det(a) assert_almost_equal(d1, d2) def test_random_complex(self): basic_det = linalg.det n = 20 for i in range(4): a = random([n, n]) + 2j*random([n, n]) d1 = det(a) d2 = basic_det(a) assert_allclose(d1, d2, rtol=1e-13) def test_check_finite(self): a = [[1, 2], [3, 4]] a_det = det(a, check_finite=False) assert_almost_equal(a_det, -2.0) def direct_lstsq(a, b, cmplx=0): at = transpose(a) if cmplx: at = conjugate(at) a1 = dot(at, a) b1 = dot(at, b) return solve(a1, b1) class TestLstsq(object): lapack_drivers = ('gelsd', 'gelss', 'gelsy', None) def setup_method(self): np.random.seed(1234) def test_simple_exact(self): for dtype in REAL_DTYPES: a = np.array([[1, 20], [-30, 4]], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): for bt in (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))): # Store values in case they are overwritten # later a1 = a.copy() b = np.array(bt, dtype=dtype) b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose( dot(a, x), b, atol=25 * _eps_cast(a1.dtype), rtol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_overdet(self): for dtype in REAL_DTYPES: a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype) b = np.array([1, 2, 3], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] if lapack_driver == 'gelsy': residuals = np.sum((b - a.dot(x))**2) else: residuals = out[1] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), residuals, rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) assert_allclose(x, (-0.428571428571429, 0.85714285714285), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_overdet_complex(self): for dtype in COMPLEX_DTYPES: a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype) b = np.array([1, 2+4j, 3], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] if lapack_driver == 'gelsy': res = b - a.dot(x) residuals = np.sum(res * res.conj()) else: residuals = out[1] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), residuals, rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) assert_allclose( x, (-0.4831460674157303 + 0.258426966292135j, 0.921348314606741 + 0.292134831460674j), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_underdet(self): for dtype in REAL_DTYPES: a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) b = np.array([1, 2], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(x, (-0.055555555555555, 0.111111111111111, 0.277777777777777), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_exact(self): for dtype in REAL_DTYPES: for n in (20, 200): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, n]), dtype=dtype) for i in range(n): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(4): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == n, 'expected efficient rank %s, ' 'got %s' % (n, r)) if dtype is np.float32: assert_allclose( dot(a, x), b, rtol=500 * _eps_cast(a1.dtype), atol=500 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) else: assert_allclose( dot(a, x), b, rtol=1000 * _eps_cast(a1.dtype), atol=1000 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_complex_exact(self): for dtype in COMPLEX_DTYPES: for n in (20, 200): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, n]) + 1j*random([n, n]), dtype=dtype) for i in range(n): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(2): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == n, 'expected efficient rank %s, ' 'got %s' % (n, r)) if dtype is np.complex64: assert_allclose( dot(a, x), b, rtol=400 * _eps_cast(a1.dtype), atol=400 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) else: assert_allclose( dot(a, x), b, rtol=1000 * _eps_cast(a1.dtype), atol=1000 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_overdet(self): for dtype in REAL_DTYPES: for (n, m) in ((20, 15), (200, 2)): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, m]), dtype=dtype) for i in range(m): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(4): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == m, 'expected efficient rank %s, ' 'got %s' % (m, r)) assert_allclose( x, direct_lstsq(a, b, cmplx=0), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_complex_overdet(self): for dtype in COMPLEX_DTYPES: for (n, m) in ((20, 15), (200, 2)): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, m]) + 1j*random([n, m]), dtype=dtype) for i in range(m): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(2): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten # later a1 = a.copy() b1 = b.copy() out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == m, 'expected efficient rank %s, ' 'got %s' % (m, r)) assert_allclose( x, direct_lstsq(a, b, cmplx=1), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_check_finite(self): with suppress_warnings() as sup: # On (some) OSX this tests triggers a warning (gh-7538) sup.filter(RuntimeWarning, "internal gelsd driver lwork query error,.*" "Falling back to 'gelss' driver.") at = np.array(((1, 20), (-30, 4))) for dtype, bt, lapack_driver, overwrite, check_finite in \ itertools.product(REAL_DTYPES, (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))), TestLstsq.lapack_drivers, (True, False), (True, False)): a = at.astype(dtype) b = np.array(bt, dtype=dtype) # Store values in case they are overwritten # later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, check_finite=check_finite, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: raise AssertionError('LstsqLapackError raised with ' '"lapack_driver" being "None".') else: # can't proceed, # skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(dot(a, x), b, rtol=25 * _eps_cast(a.dtype), atol=25 * _eps_cast(a.dtype), err_msg="driver: %s" % lapack_driver) def test_zero_size(self): for a_shape, b_shape in (((0, 2), (0,)), ((0, 4), (0, 2)), ((4, 0), (4,)), ((4, 0), (4, 2))): b = np.ones(b_shape) x, residues, rank, s = lstsq(np.zeros(a_shape), b) assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:])) residues_should_be = (np.empty((0,)) if a_shape[1] else np.linalg.norm(b, axis=0)**2) assert_equal(residues, residues_should_be) assert_(rank == 0, 'expected rank 0') assert_equal(s, np.empty((0,))) class TestPinv(object): def test_simple_real(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a_pinv = pinv(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_simple_complex(self): a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float)) a_pinv = pinv(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_simple_singular(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_simple_cols(self): a = array([[1, 2, 3], [4, 5, 6]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_simple_rows(self): a = array([[1, 2], [3, 4], [5, 6]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_check_finite(self): a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]]) a_pinv = pinv(a, check_finite=False) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a, check_finite=False) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_native_list_argument(self): a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) class TestPinvSymmetric(object): def test_simple_real(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a = np.dot(a, a.T) a_pinv = pinvh(a) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_nonpositive(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) a = np.dot(a, a.T) u, s, vt = np.linalg.svd(a) s[0] *= -1 a = np.dot(u * s, vt) # a is now symmetric non-positive and singular a_pinv = pinv2(a) a_pinvh = pinvh(a) assert_array_almost_equal(a_pinv, a_pinvh) def test_simple_complex(self): a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float)) a = np.dot(a, a.conj().T) a_pinv = pinvh(a) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_native_list_argument(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a = np.dot(a, a.T) a_pinv = pinvh(a.tolist()) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) class TestVectorNorms(object): def test_types(self): for dtype in np.typecodes['AllFloat']: x = np.array([1, 2, 3], dtype=dtype) tol = max(1e-15, np.finfo(dtype).eps.real * 20) assert_allclose(norm(x), np.sqrt(14), rtol=tol) assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) for dtype in np.typecodes['Complex']: x = np.array([1j, 2j, 3j], dtype=dtype) tol = max(1e-15, np.finfo(dtype).eps.real * 20) assert_allclose(norm(x), np.sqrt(14), rtol=tol) assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) def test_overflow(self): # unlike numpy's norm, this one is # safer on overflow a = array([1e20], dtype=float32) assert_almost_equal(norm(a), a) def test_stable(self): # more stable than numpy's norm a = array([1e4] + [1]*10000, dtype=float32) try: # snrm in double precision; we obtain the same as for float64 # -- large atol needed due to varying blas implementations assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2) except AssertionError: # snrm implemented in single precision, == np.linalg.norm result msg = ": Result should equal either 0.0 or 0.5 (depending on " \ "implementation of snrm2)." assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg) def test_zero_norm(self): assert_equal(norm([1, 0, 3], 0), 2) assert_equal(norm([1, 2, 3], 0), 3) def test_axis_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2) assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2) @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason="") def test_keepdims_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') b = norm(a, axis=1, keepdims=True) assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2) assert_(b.shape == (2, 1, 2)) assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2) class TestMatrixNorms(object): def test_matrix_norms(self): # Not all of these are matrix norms in the most technical sense. np.random.seed(1234) for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4): for t in np.single, np.double, np.csingle, np.cdouble, np.int64: A = 10 * np.random.randn(n, m).astype(t) if np.issubdtype(A.dtype, np.complexfloating): A = (A + 10j * np.random.randn(n, m)).astype(t) t_high = np.cdouble else: t_high = np.double for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf): actual = norm(A, ord=order) desired = np.linalg.norm(A, ord=order) # SciPy may return higher precision matrix norms. # This is a consequence of using LAPACK. if not np.allclose(actual, desired): desired = np.linalg.norm(A.astype(t_high), ord=order) assert_allclose(actual, desired) def test_axis_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') b = norm(a, ord=np.inf, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1)) d = norm(a, ord=1, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) b = norm(a, ord=1, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1)) d = norm(a, ord=np.inf, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason="") def test_keepdims_kwd(self): a = np.arange(120, dtype='d').reshape(2, 3, 4, 5) b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True) c = norm(a, ord=1, axis=(0, 1), keepdims=True) assert_allclose(b, c) assert_(b.shape == c.shape) class TestOverwrite(object): def test_solve(self): assert_no_overwrite(solve, [(3, 3), (3,)]) def test_solve_triangular(self): assert_no_overwrite(solve_triangular, [(3, 3), (3,)]) def test_solve_banded(self): assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b), [(4, 6), (6,)]) def test_solveh_banded(self): assert_no_overwrite(solveh_banded, [(2, 6), (6,)]) def test_inv(self): assert_no_overwrite(inv, [(3, 3)]) def test_det(self): assert_no_overwrite(det, [(3, 3)]) def test_lstsq(self): assert_no_overwrite(lstsq, [(3, 2), (3,)]) def test_pinv(self): assert_no_overwrite(pinv, [(3, 3)]) def test_pinv2(self): assert_no_overwrite(pinv2, [(3, 3)]) def test_pinvh(self): assert_no_overwrite(pinvh, [(3, 3)]) class TestSolveCirculant(object): def test_basic1(self): c = np.array([1, 2, 3, 5]) b = np.array([1, -1, 1, 0]) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_basic2(self): # b is a 2-d matrix. c = np.array([1, 2, -3, -5]) b = np.arange(12).reshape(4, 3) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_basic3(self): # b is a 3-d matrix. c = np.array([1, 2, -3, -5]) b = np.arange(24).reshape(4, 3, 2) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_complex(self): # Complex b and c c = np.array([1+2j, -3, 4j, 5]) b = np.arange(8).reshape(4, 2) + 0.5j x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_random_b_and_c(self): # Random b and c np.random.seed(54321) c = np.random.randn(50) b = np.random.randn(50) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_singular(self): # c gives a singular circulant matrix. c = np.array([1, 1, 0, 0]) b = np.array([1, 2, 3, 4]) x = solve_circulant(c, b, singular='lstsq') y, res, rnk, s = lstsq(circulant(c), b) assert_allclose(x, y) assert_raises(LinAlgError, solve_circulant, x, y) def test_axis_args(self): # Test use of caxis, baxis and outaxis. # c has shape (2, 1, 4) c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]]) # b has shape (3, 4) b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]]) x = solve_circulant(c, b, baxis=1) assert_equal(x.shape, (4, 2, 3)) expected = np.empty_like(x) expected[:, 0, :] = solve(circulant(c[0]), b.T) expected[:, 1, :] = solve(circulant(c[1]), b.T) assert_allclose(x, expected) x = solve_circulant(c, b, baxis=1, outaxis=-1) assert_equal(x.shape, (2, 3, 4)) assert_allclose(np.rollaxis(x, -1), expected) # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3). x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1) assert_equal(x.shape, (4, 2, 3)) assert_allclose(x, expected) def test_native_list_arguments(self): # Same as test_basic1 using python's native list. c = [1, 2, 3, 5] b = [1, -1, 1, 0] x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) class TestMatrix_Balance(object): def test_string_arg(self): assert_raises(ValueError, matrix_balance, 'Some string for fail') def test_infnan_arg(self): assert_raises(ValueError, matrix_balance, np.array([[1, 2], [3, np.inf]])) assert_raises(ValueError, matrix_balance, np.array([[1, 2], [3, np.nan]])) def test_scaling(self): _, y = matrix_balance(np.array([[1000, 1], [1000, 0]])) # Pre/post LAPACK 3.5.0 gives the same result up to an offset # since in each case col norm is x1000 greater and # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5. assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5) def test_scaling_order(self): A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]]) x, y = matrix_balance(A) assert_allclose(solve(y, A).dot(y), x) def test_separate(self): _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]), separate=1) assert_equal(int(np.diff(np.log2(y))), 5) assert_allclose(z, np.arange(2)) def test_permutation(self): A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))), np.ones((3, 3))) x, (y, z) = matrix_balance(A, separate=1) assert_allclose(y, np.ones_like(y)) assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2])) def test_perm_and_scaling(self): # Matrix with its diagonal removed cases = ( # Case 0 np.array([[0., 0., 0., 0., 0.000002], [0., 0., 0., 0., 0.], [2., 2., 0., 0., 0.], [2., 2., 0., 0., 0.], [0., 0., 0.000002, 0., 0.]]), # Case 1 user reported GH-7258 np.array([[-0.5, 0., 0., 0.], [0., -1., 0., 0.], [1., 0., -0.5, 0.], [0., 1., 0., -1.]]), # Case 2 user reported GH-7258 np.array([[-3., 0., 1., 0.], [-1., -1., -0., 1.], [-3., -0., -0., 0.], [-1., -0., 1., -1.]]) ) for A in cases: x, y = matrix_balance(A) x, (s, p) = matrix_balance(A, separate=1) ip = np.empty_like(p) ip[p] = np.arange(A.shape[0]) assert_allclose(y, np.diag(s)[ip, :]) assert_allclose(solve(y, A).dot(y), x)
63,089
37.143894
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_fblas.py
# Test interfaces to fortran blas. # # The tests are more of interface than they are of the underlying blas. # Only very small matrices checked -- N=3 or so. # # !! Complex calculations really aren't checked that carefully. # !! Only real valued complex numbers are used in tests. from __future__ import division, print_function, absolute_import from numpy import float32, float64, complex64, complex128, arange, array, \ zeros, shape, transpose, newaxis, common_type, conjugate from scipy.linalg import _fblas as fblas from scipy._lib.six import xrange from numpy.testing import assert_array_equal, \ assert_allclose, assert_array_almost_equal, assert_ # decimal accuracy to require between Python and LAPACK/BLAS calculations accuracy = 5 # Since numpy.dot likely uses the same blas, use this routine # to check. def matrixmultiply(a, b): if len(b.shape) == 1: b_is_vector = True b = b[:, newaxis] else: b_is_vector = False assert_(a.shape[1] == b.shape[0]) c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) for i in xrange(a.shape[0]): for j in xrange(b.shape[1]): s = 0 for k in xrange(a.shape[1]): s += a[i, k] * b[k, j] c[i, j] = s if b_is_vector: c = c.reshape((a.shape[0],)) return c ################################################## # Test blas ?axpy class BaseAxpy(object): ''' Mixin class for axpy tests ''' def test_default_a(self): x = arange(3., dtype=self.dtype) y = arange(3., dtype=x.dtype) real_y = x*1.+y y = self.blas_func(x, y) assert_array_equal(real_y, y) def test_simple(self): x = arange(3., dtype=self.dtype) y = arange(3., dtype=x.dtype) real_y = x*3.+y y = self.blas_func(x, y, a=3.) assert_array_equal(real_y, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) y = arange(3., dtype=x.dtype) real_y = x[::2]*3.+y y = self.blas_func(x, y, a=3., n=3, incx=2) assert_array_equal(real_y, y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) real_y = x*3.+y[::2] y = self.blas_func(x, y, a=3., n=3, incy=2) assert_array_equal(real_y, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) real_y = x[::4]*3.+y[::2] y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2) assert_array_equal(real_y, y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=4, incx=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=3, incy=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) try: class TestSaxpy(BaseAxpy): blas_func = fblas.saxpy dtype = float32 except AttributeError: class TestSaxpy: pass class TestDaxpy(BaseAxpy): blas_func = fblas.daxpy dtype = float64 try: class TestCaxpy(BaseAxpy): blas_func = fblas.caxpy dtype = complex64 except AttributeError: class TestCaxpy: pass class TestZaxpy(BaseAxpy): blas_func = fblas.zaxpy dtype = complex128 ################################################## # Test blas ?scal class BaseScal(object): ''' Mixin class for scal testing ''' def test_simple(self): x = arange(3., dtype=self.dtype) real_x = x*3. x = self.blas_func(3., x) assert_array_equal(real_x, x) def test_x_stride(self): x = arange(6., dtype=self.dtype) real_x = x.copy() real_x[::2] = x[::2]*array(3., self.dtype) x = self.blas_func(3., x, n=3, incx=2) assert_array_equal(real_x, x) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) try: self.blas_func(2., x, n=4, incx=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) try: class TestSscal(BaseScal): blas_func = fblas.sscal dtype = float32 except AttributeError: class TestSscal: pass class TestDscal(BaseScal): blas_func = fblas.dscal dtype = float64 try: class TestCscal(BaseScal): blas_func = fblas.cscal dtype = complex64 except AttributeError: class TestCscal: pass class TestZscal(BaseScal): blas_func = fblas.zscal dtype = complex128 ################################################## # Test blas ?copy class BaseCopy(object): ''' Mixin class for copy testing ''' def test_simple(self): x = arange(3., dtype=self.dtype) y = zeros(shape(x), x.dtype) y = self.blas_func(x, y) assert_array_equal(x, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) y = self.blas_func(x, y, n=3, incx=2) assert_array_equal(x[::2], y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) y = self.blas_func(x, y, n=3, incy=2) assert_array_equal(x, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) y = self.blas_func(x, y, n=3, incx=4, incy=2) assert_array_equal(x[::4], y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=4, incx=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=3, incy=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) # def test_y_bad_type(self): ## Hmmm. Should this work? What should be the output. # x = arange(3.,dtype=self.dtype) # y = zeros(shape(x)) # self.blas_func(x,y) # assert_array_equal(x,y) try: class TestScopy(BaseCopy): blas_func = fblas.scopy dtype = float32 except AttributeError: class TestScopy: pass class TestDcopy(BaseCopy): blas_func = fblas.dcopy dtype = float64 try: class TestCcopy(BaseCopy): blas_func = fblas.ccopy dtype = complex64 except AttributeError: class TestCcopy: pass class TestZcopy(BaseCopy): blas_func = fblas.zcopy dtype = complex128 ################################################## # Test blas ?swap class BaseSwap(object): ''' Mixin class for swap tests ''' def test_simple(self): x = arange(3., dtype=self.dtype) y = zeros(shape(x), x.dtype) desired_x = y.copy() desired_y = x.copy() x, y = self.blas_func(x, y) assert_array_equal(desired_x, x) assert_array_equal(desired_y, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) desired_x = y.copy() desired_y = x.copy()[::2] x, y = self.blas_func(x, y, n=3, incx=2) assert_array_equal(desired_x, x[::2]) assert_array_equal(desired_y, y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) desired_x = y.copy()[::2] desired_y = x.copy() x, y = self.blas_func(x, y, n=3, incy=2) assert_array_equal(desired_x, x) assert_array_equal(desired_y, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) desired_x = y.copy()[::2] desired_y = x.copy()[::4] x, y = self.blas_func(x, y, n=3, incx=4, incy=2) assert_array_equal(desired_x, x[::4]) assert_array_equal(desired_y, y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=4, incx=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) try: self.blas_func(x, y, n=3, incy=5) except: # what kind of error should be caught? return # should catch error and never get here assert_(0) try: class TestSswap(BaseSwap): blas_func = fblas.sswap dtype = float32 except AttributeError: class TestSswap: pass class TestDswap(BaseSwap): blas_func = fblas.dswap dtype = float64 try: class TestCswap(BaseSwap): blas_func = fblas.cswap dtype = complex64 except AttributeError: class TestCswap: pass class TestZswap(BaseSwap): blas_func = fblas.zswap dtype = complex128 ################################################## # Test blas ?gemv # This will be a mess to test all cases. class BaseGemv(object): ''' Mixin class for gemv tests ''' def get_data(self, x_stride=1, y_stride=1): mult = array(1, dtype=self.dtype) if self.dtype in [complex64, complex128]: mult = array(1+1j, dtype=self.dtype) from numpy.random import normal, seed seed(1234) alpha = array(1., dtype=self.dtype) * mult beta = array(1., dtype=self.dtype) * mult a = normal(0., 1., (3, 3)).astype(self.dtype) * mult x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult return alpha, beta, a, x, y def test_simple(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(a, x)+beta*y y = self.blas_func(alpha, a, x, beta, y) assert_array_almost_equal(desired_y, y) def test_default_beta_y(self): alpha, beta, a, x, y = self.get_data() desired_y = matrixmultiply(a, x) y = self.blas_func(1, a, x) assert_array_almost_equal(desired_y, y) def test_simple_transpose(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=1) assert_array_almost_equal(desired_y, y) def test_simple_transpose_conj(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=2) assert_array_almost_equal(desired_y, y) def test_x_stride(self): alpha, beta, a, x, y = self.get_data(x_stride=2) desired_y = alpha*matrixmultiply(a, x[::2])+beta*y y = self.blas_func(alpha, a, x, beta, y, incx=2) assert_array_almost_equal(desired_y, y) def test_x_stride_transpose(self): alpha, beta, a, x, y = self.get_data(x_stride=2) desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2) assert_array_almost_equal(desired_y, y) def test_x_stride_assert(self): # What is the use of this test? alpha, beta, a, x, y = self.get_data(x_stride=2) try: y = self.blas_func(1, a, x, 1, y, trans=0, incx=3) assert_(0) except: pass try: y = self.blas_func(1, a, x, 1, y, trans=1, incx=3) assert_(0) except: pass def test_y_stride(self): alpha, beta, a, x, y = self.get_data(y_stride=2) desired_y = y.copy() desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2] y = self.blas_func(alpha, a, x, beta, y, incy=2) assert_array_almost_equal(desired_y, y) def test_y_stride_transpose(self): alpha, beta, a, x, y = self.get_data(y_stride=2) desired_y = y.copy() desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2] y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2) assert_array_almost_equal(desired_y, y) def test_y_stride_assert(self): # What is the use of this test? alpha, beta, a, x, y = self.get_data(y_stride=2) try: y = self.blas_func(1, a, x, 1, y, trans=0, incy=3) assert_(0) except: pass try: y = self.blas_func(1, a, x, 1, y, trans=1, incy=3) assert_(0) except: pass try: class TestSgemv(BaseGemv): blas_func = fblas.sgemv dtype = float32 def test_sgemv_on_osx(self): from itertools import product import sys import numpy as np if sys.platform != 'darwin': return def aligned_array(shape, align, dtype, order='C'): # Make array shape `shape` with aligned at `align` bytes d = dtype() # Make array of correct size with `align` extra bytes N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] # Find offset into array giving desired alignment for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): # Copy `arr` into an aligned array with same shape aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(self.blas_func(1.0, A, X), desired, rtol=1e-5, atol=1e-7) testdata = product((15, 32), (10000,), (200, 89), ('C', 'F')) for align, m, n, a_order in testdata: A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32, order=a_order) assert_dot_close(A_f, X_f, desired) except AttributeError: class TestSgemv: pass class TestDgemv(BaseGemv): blas_func = fblas.dgemv dtype = float64 try: class TestCgemv(BaseGemv): blas_func = fblas.cgemv dtype = complex64 except AttributeError: class TestCgemv: pass class TestZgemv(BaseGemv): blas_func = fblas.zgemv dtype = complex128 """ ################################################## ### Test blas ?ger ### This will be a mess to test all cases. class BaseGer(object): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal, seed seed(1234) alpha = array(1., dtype = self.dtype) a = normal(0.,1.,(3,3)).astype(self.dtype) x = arange(shape(a)[0]*x_stride,dtype=self.dtype) y = arange(shape(a)[1]*y_stride,dtype=self.dtype) return alpha,a,x,y def test_simple(self): alpha,a,x,y = self.get_data() # tranpose takes care of Fortran vs. C(and Python) memory layout desired_a = alpha*transpose(x[:,newaxis]*y) + a self.blas_func(x,y,a) assert_array_almost_equal(desired_a,a) def test_x_stride(self): alpha,a,x,y = self.get_data(x_stride=2) desired_a = alpha*transpose(x[::2,newaxis]*y) + a self.blas_func(x,y,a,incx=2) assert_array_almost_equal(desired_a,a) def test_x_stride_assert(self): alpha,a,x,y = self.get_data(x_stride=2) try: self.blas_func(x,y,a,incx=3) assert(0) except: pass def test_y_stride(self): alpha,a,x,y = self.get_data(y_stride=2) desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a self.blas_func(x,y,a,incy=2) assert_array_almost_equal(desired_a,a) def test_y_stride_assert(self): alpha,a,x,y = self.get_data(y_stride=2) try: self.blas_func(a,x,y,incy=3) assert(0) except: pass class TestSger(BaseGer): blas_func = fblas.sger dtype = float32 class TestDger(BaseGer): blas_func = fblas.dger dtype = float64 """ ################################################## # Test blas ?gerc # This will be a mess to test all cases. """ class BaseGerComplex(BaseGer): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal, seed seed(1234) alpha = array(1+1j, dtype = self.dtype) a = normal(0.,1.,(3,3)).astype(self.dtype) a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) x = x + x * array(1j, dtype = self.dtype) y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) y = y + y * array(1j, dtype = self.dtype) return alpha,a,x,y def test_simple(self): alpha,a,x,y = self.get_data() # tranpose takes care of Fortran vs. C(and Python) memory layout a = a * array(0.,dtype = self.dtype) #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a desired_a = alpha*transpose(x[:,newaxis]*y) + a #self.blas_func(x,y,a,alpha = alpha) fblas.cgeru(x,y,a,alpha = alpha) assert_array_almost_equal(desired_a,a) #def test_x_stride(self): # alpha,a,x,y = self.get_data(x_stride=2) # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a # self.blas_func(x,y,a,incx=2) # assert_array_almost_equal(desired_a,a) #def test_y_stride(self): # alpha,a,x,y = self.get_data(y_stride=2) # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a # self.blas_func(x,y,a,incy=2) # assert_array_almost_equal(desired_a,a) class TestCgeru(BaseGerComplex): blas_func = fblas.cgeru dtype = complex64 def transform(self,x): return x class TestZgeru(BaseGerComplex): blas_func = fblas.zgeru dtype = complex128 def transform(self,x): return x class TestCgerc(BaseGerComplex): blas_func = fblas.cgerc dtype = complex64 def transform(self,x): return conjugate(x) class TestZgerc(BaseGerComplex): blas_func = fblas.zgerc dtype = complex128 def transform(self,x): return conjugate(x) """
19,430
28.620427
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_decomp_ldl.py
from __future__ import division, print_function, absolute_import import itertools from numpy.testing import assert_array_almost_equal, assert_allclose, assert_ from numpy import (array, eye, zeros, empty_like, empty, tril_indices_from, tril, triu_indices_from, spacing, float32, float64, complex64, complex128) from numpy.random import rand, randint, seed from scipy.linalg import ldl from pytest import raises as assert_raises, warns from numpy import ComplexWarning def test_args(): A = eye(3) # Nonsquare array assert_raises(ValueError, ldl, A[:, :2]) # Complex matrix with imaginary diagonal entries with "hermitian=True" with warns(ComplexWarning): ldl(A*1j) def test_empty_array(): a = empty((0, 0), dtype=complex) l, d, p = ldl(empty((0, 0))) assert_array_almost_equal(l, empty_like(a)) assert_array_almost_equal(d, empty_like(a)) assert_array_almost_equal(p, array([], dtype=int)) def test_simple(): a = array([[-0.39-0.71j, 5.14-0.64j, -7.86-2.96j, 3.80+0.92j], [5.14-0.64j, 8.86+1.81j, -3.52+0.58j, 5.32-1.59j], [-7.86-2.96j, -3.52+0.58j, -2.83-0.03j, -1.54-2.86j], [3.80+0.92j, 5.32-1.59j, -1.54-2.86j, -0.56+0.12j]]) b = array([[5., 10, 1, 18], [10., 2, 11, 1], [1., 11, 19, 9], [18., 1, 9, 0]]) c = array([[52., 97, 112, 107, 50], [97., 114, 89, 98, 13], [112., 89, 64, 33, 6], [107., 98, 33, 60, 73], [50., 13, 6, 73, 77]]) d = array([[2., 2, -4, 0, 4], [2., -2, -2, 10, -8], [-4., -2, 6, -8, -4], [0., 10, -8, 6, -6], [4., -8, -4, -6, 10]]) e = array([[-1.36+0.00j, 0+0j, 0+0j, 0+0j], [1.58-0.90j, -8.87+0j, 0+0j, 0+0j], [2.21+0.21j, -1.84+0.03j, -4.63+0j, 0+0j], [3.91-1.50j, -1.78-1.18j, 0.11-0.11j, -1.84+0.00j]]) for x in (b, c, d): l, d, p = ldl(x) assert_allclose(l.dot(d).dot(l.T), x, atol=spacing(1000.), rtol=0) u, d, p = ldl(x, lower=False) assert_allclose(u.dot(d).dot(u.T), x, atol=spacing(1000.), rtol=0) l, d, p = ldl(a, hermitian=False) assert_allclose(l.dot(d).dot(l.T), a, atol=spacing(1000.), rtol=0) u, d, p = ldl(a, lower=False, hermitian=False) assert_allclose(u.dot(d).dot(u.T), a, atol=spacing(1000.), rtol=0) # Use upper part for the computation and use the lower part for comparison l, d, p = ldl(e.conj().T, lower=0) assert_allclose(tril(l.dot(d).dot(l.conj().T)-e), zeros((4, 4)), atol=spacing(1000.), rtol=0) def test_permutations(): seed(1234) for _ in range(10): n = randint(1, 100) # Random real/complex array x = rand(n, n) if randint(2) else rand(n, n) + rand(n, n)*1j x = x + x.conj().T x += eye(n)*randint(5, 1e6) l_ind = tril_indices_from(x, k=-1) u_ind = triu_indices_from(x, k=1) # Test whether permutations lead to a triangular array u, d, p = ldl(x, lower=0) # lower part should be zero assert_(not any(u[p, :][l_ind]), 'Spin {} failed'.format(_)) l, d, p = ldl(x, lower=1) # upper part should be zero assert_(not any(l[p, :][u_ind]), 'Spin {} failed'.format(_)) def test_ldl_type_size_combinations(): seed(1234) sizes = [30, 750] real_dtypes = [float32, float64] complex_dtypes = [complex64, complex128] for n, dtype in itertools.product(sizes, real_dtypes): msg = ("Failed for size: {}, dtype: {}".format(n, dtype)) x = rand(n, n).astype(dtype) x = x + x.T x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) l, d1, p = ldl(x) u, d2, p = ldl(x, lower=0) rtol = 1e-4 if dtype is float32 else 1e-10 assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg) assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg) for n, dtype in itertools.product(sizes, complex_dtypes): msg1 = ("Her failed for size: {}, dtype: {}".format(n, dtype)) msg2 = ("Sym failed for size: {}, dtype: {}".format(n, dtype)) # Complex hermitian upper/lower x = (rand(n, n)+1j*rand(n, n)).astype(dtype) x = x+x.conj().T x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) l, d1, p = ldl(x) u, d2, p = ldl(x, lower=0) rtol = 1e-4 if dtype is complex64 else 1e-10 assert_allclose(l.dot(d1).dot(l.conj().T), x, rtol=rtol, err_msg=msg1) assert_allclose(u.dot(d2).dot(u.conj().T), x, rtol=rtol, err_msg=msg1) # Complex symmetric upper/lower x = (rand(n, n)+1j*rand(n, n)).astype(dtype) x = x+x.T x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) l, d1, p = ldl(x, hermitian=0) u, d2, p = ldl(x, lower=0, hermitian=0) assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg2) assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg2)
5,107
36.014493
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_build.py
from __future__ import division, print_function, absolute_import from subprocess import call, PIPE, Popen import sys import re import pytest from numpy.testing import assert_ from numpy.compat import asbytes from scipy.linalg import _flapack as flapack # XXX: this is copied from numpy trunk. Can be removed when we will depend on # numpy 1.3 class FindDependenciesLdd: def __init__(self): self.cmd = ['ldd'] try: st = call(self.cmd, stdout=PIPE, stderr=PIPE) except OSError: raise RuntimeError("command %s cannot be run" % self.cmd) def get_dependencies(self, file): p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if not (p.returncode == 0): raise RuntimeError("Failed to check dependencies for %s" % file) return stdout def grep_dependencies(self, file, deps): stdout = self.get_dependencies(file) rdeps = dict([(asbytes(dep), re.compile(asbytes(dep))) for dep in deps]) founds = [] for l in stdout.splitlines(): for k, v in rdeps.items(): if v.search(l): founds.append(k) return founds class TestF77Mismatch(object): @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), reason="Skipping fortran compiler mismatch on non Linux platform") def test_lapack(self): f = FindDependenciesLdd() deps = f.grep_dependencies(flapack.__file__, ['libg2c', 'libgfortran']) assert_(not (len(deps) > 1), """Both g77 and gfortran runtimes linked in scipy.linalg.flapack ! This is likely to cause random crashes and wrong results. See numpy INSTALL.rst.txt for more information.""")
1,806
30.155172
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_blas.py
# # Created by: Pearu Peterson, April 2002 # from __future__ import division, print_function, absolute_import __usage__ = """ Build linalg: python setup.py build Run tests if scipy is installed: python -c 'import scipy;scipy.linalg.test()' """ import math import numpy as np from numpy.testing import (assert_equal, assert_almost_equal, assert_, assert_array_almost_equal, assert_allclose) from pytest import raises as assert_raises from numpy import float32, float64, complex64, complex128, arange, triu, \ tril, zeros, tril_indices, ones, mod, diag, append, eye, \ nonzero from numpy.random import rand, seed from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve, \ solve_triangular try: from scipy.linalg import _cblas as cblas except ImportError: cblas = None REAL_DTYPES = [float32, float64] COMPLEX_DTYPES = [complex64, complex128] DTYPES = REAL_DTYPES + COMPLEX_DTYPES def test_get_blas_funcs(): # check that it returns Fortran code for arrays that are # fortran-ordered f1, f2, f3 = get_blas_funcs( ('axpy', 'axpy', 'axpy'), (np.empty((2, 2), dtype=np.complex64, order='F'), np.empty((2, 2), dtype=np.complex128, order='C')) ) # get_blas_funcs will choose libraries depending on most generic # array assert_equal(f1.typecode, 'z') assert_equal(f2.typecode, 'z') if cblas is not None: assert_equal(f1.module_name, 'cblas') assert_equal(f2.module_name, 'cblas') # check defaults. f1 = get_blas_funcs('rotg') assert_equal(f1.typecode, 'd') # check also dtype interface f1 = get_blas_funcs('gemm', dtype=np.complex64) assert_equal(f1.typecode, 'c') f1 = get_blas_funcs('gemm', dtype='F') assert_equal(f1.typecode, 'c') # extended precision complex f1 = get_blas_funcs('gemm', dtype=np.longcomplex) assert_equal(f1.typecode, 'z') # check safe complex upcasting f1 = get_blas_funcs('axpy', (np.empty((2, 2), dtype=np.float64), np.empty((2, 2), dtype=np.complex64)) ) assert_equal(f1.typecode, 'z') def test_get_blas_funcs_alias(): # check alias for get_blas_funcs f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64) assert f.typecode == 'c' assert g.typecode == 'c' f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64) assert f is g assert f is h class TestCBLAS1Simple(object): def test_axpy(self): for p in 'sd': f = getattr(cblas, p+'axpy', None) if f is None: continue assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), [7, 9, 18]) for p in 'cz': f = getattr(cblas, p+'axpy', None) if f is None: continue assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), [7, 10j-1, 18]) class TestFBLAS1Simple(object): def test_axpy(self): for p in 'sd': f = getattr(fblas, p+'axpy', None) if f is None: continue assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), [7, 9, 18]) for p in 'cz': f = getattr(fblas, p+'axpy', None) if f is None: continue assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), [7, 10j-1, 18]) def test_copy(self): for p in 'sd': f = getattr(fblas, p+'copy', None) if f is None: continue assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5]) for p in 'cz': f = getattr(fblas, p+'copy', None) if f is None: continue assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j]) def test_asum(self): for p in 'sd': f = getattr(fblas, p+'asum', None) if f is None: continue assert_almost_equal(f([3, -4, 5]), 12) for p in ['sc', 'dz']: f = getattr(fblas, p+'asum', None) if f is None: continue assert_almost_equal(f([3j, -4, 3-4j]), 14) def test_dot(self): for p in 'sd': f = getattr(fblas, p+'dot', None) if f is None: continue assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9) def test_complex_dotu(self): for p in 'cz': f = getattr(fblas, p+'dotu', None) if f is None: continue assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j) def test_complex_dotc(self): for p in 'cz': f = getattr(fblas, p+'dotc', None) if f is None: continue assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j) def test_nrm2(self): for p in 'sd': f = getattr(fblas, p+'nrm2', None) if f is None: continue assert_almost_equal(f([3, -4, 5]), math.sqrt(50)) for p in ['c', 'z', 'sc', 'dz']: f = getattr(fblas, p+'nrm2', None) if f is None: continue assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50)) def test_scal(self): for p in 'sd': f = getattr(fblas, p+'scal', None) if f is None: continue assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10]) for p in 'cz': f = getattr(fblas, p+'scal', None) if f is None: continue assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j]) for p in ['cs', 'zd']: f = getattr(fblas, p+'scal', None) if f is None: continue assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j]) def test_swap(self): for p in 'sd': f = getattr(fblas, p+'swap', None) if f is None: continue x, y = [2, 3, 1], [-2, 3, 7] x1, y1 = f(x, y) assert_array_almost_equal(x1, y) assert_array_almost_equal(y1, x) for p in 'cz': f = getattr(fblas, p+'swap', None) if f is None: continue x, y = [2, 3j, 1], [-2, 3, 7-3j] x1, y1 = f(x, y) assert_array_almost_equal(x1, y) assert_array_almost_equal(y1, x) def test_amax(self): for p in 'sd': f = getattr(fblas, 'i'+p+'amax') assert_equal(f([-2, 4, 3]), 1) for p in 'cz': f = getattr(fblas, 'i'+p+'amax') assert_equal(f([-5, 4+3j, 6]), 1) # XXX: need tests for rot,rotm,rotg,rotmg class TestFBLAS2Simple(object): def test_gemv(self): for p in 'sd': f = getattr(fblas, p+'gemv', None) if f is None: continue assert_array_almost_equal(f(3, [[3]], [-4]), [-36]) assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21]) for p in 'cz': f = getattr(fblas, p+'gemv', None) if f is None: continue assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j]) assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]), [-48-21j]) def test_ger(self): for p in 'sd': f = getattr(fblas, p+'ger', None) if f is None: continue assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]]) assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]), [[6, 8], [12, 16], [18, 24]]) assert_array_almost_equal(f(1, [1, 2], [3, 4], a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]]) for p in 'cz': f = getattr(fblas, p+'geru', None) if f is None: continue assert_array_almost_equal(f(1, [1j, 2], [3, 4]), [[3j, 4j], [6, 8]]) assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]), [[6, 8], [12, 16], [18, 24]]) for p in 'cz': for name in ('ger', 'gerc'): f = getattr(fblas, p+name, None) if f is None: continue assert_array_almost_equal(f(1, [1j, 2], [3, 4]), [[3j, 4j], [6, 8]]) assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]), [[6, 8], [12, 16], [18, 24]]) def test_syr_her(self): x = np.arange(1, 5, dtype='d') resx = np.triu(x[:, np.newaxis] * x) resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1]) y = np.linspace(0, 8.5, 17, endpoint=False) z = np.arange(1, 9, dtype='d').view('D') resz = np.triu(z[:, np.newaxis] * z) resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1]) rehz = np.triu(z[:, np.newaxis] * z.conj()) rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj()) w = np.c_[np.zeros(4), z, np.zeros(4)].ravel() for p, rtol in zip('sd', [1e-7, 1e-14]): f = getattr(fblas, p+'syr', None) if f is None: continue assert_allclose(f(1.0, x), resx, rtol=rtol) assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol) assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol) # negative increments imply reversed vectors in blas assert_allclose(f(1.0, y, incx=-2, offx=2, n=4), resx_reverse, rtol=rtol) a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') b = f(1.0, x, a=a, overwrite_a=True) assert_allclose(a, resx, rtol=rtol) b = f(2.0, x, a=a) assert_(a is not b) assert_allclose(b, 3*resx, rtol=rtol) assert_raises(Exception, f, 1.0, x, incx=0) assert_raises(Exception, f, 1.0, x, offx=5) assert_raises(Exception, f, 1.0, x, offx=-2) assert_raises(Exception, f, 1.0, x, n=-2) assert_raises(Exception, f, 1.0, x, n=5) assert_raises(Exception, f, 1.0, x, lower=2) assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) for p, rtol in zip('cz', [1e-7, 1e-14]): f = getattr(fblas, p+'syr', None) if f is None: continue assert_allclose(f(1.0, z), resz, rtol=rtol) assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol) assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol) # negative increments imply reversed vectors in blas assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), resz_reverse, rtol=rtol) a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') b = f(1.0, z, a=a, overwrite_a=True) assert_allclose(a, resz, rtol=rtol) b = f(2.0, z, a=a) assert_(a is not b) assert_allclose(b, 3*resz, rtol=rtol) assert_raises(Exception, f, 1.0, x, incx=0) assert_raises(Exception, f, 1.0, x, offx=5) assert_raises(Exception, f, 1.0, x, offx=-2) assert_raises(Exception, f, 1.0, x, n=-2) assert_raises(Exception, f, 1.0, x, n=5) assert_raises(Exception, f, 1.0, x, lower=2) assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) for p, rtol in zip('cz', [1e-7, 1e-14]): f = getattr(fblas, p+'her', None) if f is None: continue assert_allclose(f(1.0, z), rehz, rtol=rtol) assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol) assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol) # negative increments imply reversed vectors in blas assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), rehz_reverse, rtol=rtol) a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') b = f(1.0, z, a=a, overwrite_a=True) assert_allclose(a, rehz, rtol=rtol) b = f(2.0, z, a=a) assert_(a is not b) assert_allclose(b, 3*rehz, rtol=rtol) assert_raises(Exception, f, 1.0, x, incx=0) assert_raises(Exception, f, 1.0, x, offx=5) assert_raises(Exception, f, 1.0, x, offx=-2) assert_raises(Exception, f, 1.0, x, n=-2) assert_raises(Exception, f, 1.0, x, n=5) assert_raises(Exception, f, 1.0, x, lower=2) assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) def test_syr2(self): x = np.arange(1, 5, dtype='d') y = np.arange(5, 9, dtype='d') resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x) resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1] + y[::-1, np.newaxis] * x[::-1]) q = np.linspace(0, 8.5, 17, endpoint=False) for p, rtol in zip('sd', [1e-7, 1e-14]): f = getattr(fblas, p+'syr2', None) if f is None: continue assert_allclose(f(1.0, x, y), resxy, rtol=rtol) assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol) assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10), resxy, rtol=rtol) assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3), resxy[:3, :3], rtol=rtol) # negative increments imply reversed vectors in blas assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10), resxy_reverse, rtol=rtol) a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') b = f(1.0, x, y, a=a, overwrite_a=True) assert_allclose(a, resxy, rtol=rtol) b = f(2.0, x, y, a=a) assert_(a is not b) assert_allclose(b, 3*resxy, rtol=rtol) assert_raises(Exception, f, 1.0, x, y, incx=0) assert_raises(Exception, f, 1.0, x, y, offx=5) assert_raises(Exception, f, 1.0, x, y, offx=-2) assert_raises(Exception, f, 1.0, x, y, incy=0) assert_raises(Exception, f, 1.0, x, y, offy=5) assert_raises(Exception, f, 1.0, x, y, offy=-2) assert_raises(Exception, f, 1.0, x, y, n=-2) assert_raises(Exception, f, 1.0, x, y, n=5) assert_raises(Exception, f, 1.0, x, y, lower=2) assert_raises(Exception, f, 1.0, x, y, a=np.zeros((2, 2), 'd', 'F')) def test_her2(self): x = np.arange(1, 9, dtype='d').view('D') y = np.arange(9, 17, dtype='d').view('D') resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj() resxy = np.triu(resxy) resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj() resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj() resxy_reverse = np.triu(resxy_reverse) u = np.c_[np.zeros(4), x, np.zeros(4)].ravel() v = np.c_[np.zeros(4), y, np.zeros(4)].ravel() for p, rtol in zip('cz', [1e-7, 1e-14]): f = getattr(fblas, p+'her2', None) if f is None: continue assert_allclose(f(1.0, x, y), resxy, rtol=rtol) assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(), rtol=rtol) assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1), resxy, rtol=rtol) assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3), resxy[:3, :3], rtol=rtol) # negative increments imply reversed vectors in blas assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1), resxy_reverse, rtol=rtol) a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') b = f(1.0, x, y, a=a, overwrite_a=True) assert_allclose(a, resxy, rtol=rtol) b = f(2.0, x, y, a=a) assert_(a is not b) assert_allclose(b, 3*resxy, rtol=rtol) assert_raises(Exception, f, 1.0, x, y, incx=0) assert_raises(Exception, f, 1.0, x, y, offx=5) assert_raises(Exception, f, 1.0, x, y, offx=-2) assert_raises(Exception, f, 1.0, x, y, incy=0) assert_raises(Exception, f, 1.0, x, y, offy=5) assert_raises(Exception, f, 1.0, x, y, offy=-2) assert_raises(Exception, f, 1.0, x, y, n=-2) assert_raises(Exception, f, 1.0, x, y, n=5) assert_raises(Exception, f, 1.0, x, y, lower=2) assert_raises(Exception, f, 1.0, x, y, a=np.zeros((2, 2), 'd', 'F')) def test_gbmv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 7 m = 5 kl = 1 ku = 2 # fake a banded matrix via toeplitz A = toeplitz(append(rand(kl+1), zeros(m-kl-1)), append(rand(ku+1), zeros(n-ku-1))) A = A.astype(dtype) Ab = zeros((kl+ku+1, n), dtype=dtype) # Form the banded storage Ab[2, :5] = A[0, 0] # diag Ab[1, 1:6] = A[0, 1] # sup1 Ab[0, 2:7] = A[0, 2] # sup2 Ab[3, :4] = A[1, 0] # sub1 x = rand(n).astype(dtype) y = rand(m).astype(dtype) alpha, beta = dtype(3), dtype(-5) func, = get_blas_funcs(('gbmv',), dtype=dtype) y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, x=x, y=y, beta=beta) y2 = alpha * A.dot(x) + beta * y assert_array_almost_equal(y1, y2) def test_sbmv_hbmv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 6 k = 2 A = zeros((n, n), dtype=dtype) Ab = zeros((k+1, n), dtype=dtype) # Form the array and its packed banded storage A[arange(n), arange(n)] = rand(n) for ind2 in range(1, k+1): temp = rand(n-ind2) A[arange(n-ind2), arange(ind2, n)] = temp Ab[-1-ind2, ind2:] = temp A = A.astype(dtype) A = A + A.T if ind < 2 else A + A.conj().T Ab[-1, :] = diag(A) x = rand(n).astype(dtype) y = rand(n).astype(dtype) alpha, beta = dtype(1.25), dtype(3) if ind > 1: func, = get_blas_funcs(('hbmv',), dtype=dtype) else: func, = get_blas_funcs(('sbmv',), dtype=dtype) y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta) y2 = alpha * A.dot(x) + beta * y assert_array_almost_equal(y1, y2) def test_spmv_hpmv(self): seed(1234) for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): n = 3 A = rand(n, n).astype(dtype) if ind > 1: A += rand(n, n)*1j A = A.astype(dtype) A = A + A.T if ind < 4 else A + A.conj().T c, r = tril_indices(n) Ap = A[r, c] x = rand(n).astype(dtype) y = rand(n).astype(dtype) xlong = arange(2*n).astype(dtype) ylong = ones(2*n).astype(dtype) alpha, beta = dtype(1.25), dtype(2) if ind > 3: func, = get_blas_funcs(('hpmv',), dtype=dtype) else: func, = get_blas_funcs(('spmv',), dtype=dtype) y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta) y2 = alpha * A.dot(x) + beta * y assert_array_almost_equal(y1, y2) # Test inc and offsets y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap, incx=2, incy=2, offx=n, offy=n) y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2] assert_array_almost_equal(y1[3::2], y2) assert_almost_equal(y1[4], ylong[4]) def test_spr_hpr(self): seed(1234) for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): n = 3 A = rand(n, n).astype(dtype) if ind > 1: A += rand(n, n)*1j A = A.astype(dtype) A = A + A.T if ind < 4 else A + A.conj().T c, r = tril_indices(n) Ap = A[r, c] x = rand(n).astype(dtype) alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5) if ind > 3: func, = get_blas_funcs(('hpr',), dtype=dtype) y2 = alpha * x[:, None].dot(x[None, :].conj()) + A else: func, = get_blas_funcs(('spr',), dtype=dtype) y2 = alpha * x[:, None].dot(x[None, :]) + A y1 = func(n=n, alpha=alpha, ap=Ap, x=x) y1f = zeros((3, 3), dtype=dtype) y1f[r, c] = y1 y1f[c, r] = y1.conj() if ind > 3 else y1 assert_array_almost_equal(y1f, y2) def test_spr2_hpr2(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 3 A = rand(n, n).astype(dtype) if ind > 1: A += rand(n, n)*1j A = A.astype(dtype) A = A + A.T if ind < 2 else A + A.conj().T c, r = tril_indices(n) Ap = A[r, c] x = rand(n).astype(dtype) y = rand(n).astype(dtype) alpha = dtype(2) if ind > 1: func, = get_blas_funcs(('hpr2',), dtype=dtype) else: func, = get_blas_funcs(('spr2',), dtype=dtype) u = alpha.conj() * x[:, None].dot(y[None, :].conj()) y2 = A + u + u.conj().T y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap) y1f = zeros((3, 3), dtype=dtype) y1f[r, c] = y1 y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj() assert_array_almost_equal(y1f, y2) def test_tbmv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 10 k = 3 x = rand(n).astype(dtype) A = zeros((n, n), dtype=dtype) # Banded upper triangular array for sup in range(k+1): A[arange(n-sup), arange(sup, n)] = rand(n-sup) # Add complex parts for c,z if ind > 1: A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) # Form the banded storage Ab = zeros((k+1, n), dtype=dtype) for row in range(k+1): Ab[-row-1, row:] = diag(A, k=row) func, = get_blas_funcs(('tbmv',), dtype=dtype) y1 = func(k=k, a=Ab, x=x) y2 = A.dot(x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = A.dot(x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) y2 = A.T.dot(x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) y2 = A.conj().T.dot(x) assert_array_almost_equal(y1, y2) def test_tbsv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 6 k = 3 x = rand(n).astype(dtype) A = zeros((n, n), dtype=dtype) # Banded upper triangular array for sup in range(k+1): A[arange(n-sup), arange(sup, n)] = rand(n-sup) # Add complex parts for c,z if ind > 1: A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) # Form the banded storage Ab = zeros((k+1, n), dtype=dtype) for row in range(k+1): Ab[-row-1, row:] = diag(A, k=row) func, = get_blas_funcs(('tbsv',), dtype=dtype) y1 = func(k=k, a=Ab, x=x) y2 = solve(A, x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = solve(A, x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) y2 = solve(A.T, x) assert_array_almost_equal(y1, y2) y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) y2 = solve(A.conj().T, x) assert_array_almost_equal(y1, y2) def test_tpmv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 10 x = rand(n).astype(dtype) # Upper triangular array A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) # Form the packed storage c, r = tril_indices(n) Ap = A[r, c] func, = get_blas_funcs(('tpmv',), dtype=dtype) y1 = func(n=n, ap=Ap, x=x) y2 = A.dot(x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = A.dot(x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) y2 = A.T.dot(x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) y2 = A.conj().T.dot(x) assert_array_almost_equal(y1, y2) def test_tpsv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 10 x = rand(n).astype(dtype) # Upper triangular array A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) A += eye(n) # Form the packed storage c, r = tril_indices(n) Ap = A[r, c] func, = get_blas_funcs(('tpsv',), dtype=dtype) y1 = func(n=n, ap=Ap, x=x) y2 = solve(A, x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = solve(A, x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) y2 = solve(A.T, x) assert_array_almost_equal(y1, y2) y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) y2 = solve(A.conj().T, x) assert_array_almost_equal(y1, y2) def test_trmv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 3 A = (rand(n, n)+eye(n)).astype(dtype) x = rand(3).astype(dtype) func, = get_blas_funcs(('trmv',), dtype=dtype) y1 = func(a=A, x=x) y2 = triu(A).dot(x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = triu(A).dot(x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1, trans=1) y2 = triu(A).T.dot(x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1, trans=2) y2 = triu(A).conj().T.dot(x) assert_array_almost_equal(y1, y2) def test_trsv(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 15 A = (rand(n, n)+eye(n)).astype(dtype) x = rand(n).astype(dtype) func, = get_blas_funcs(('trsv',), dtype=dtype) y1 = func(a=A, x=x) y2 = solve(triu(A), x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, lower=1) y2 = solve(tril(A), x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1) A[arange(n), arange(n)] = dtype(1) y2 = solve(triu(A), x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1, trans=1) y2 = solve(triu(A).T, x) assert_array_almost_equal(y1, y2) y1 = func(a=A, x=x, diag=1, trans=2) y2 = solve(triu(A).conj().T, x) assert_array_almost_equal(y1, y2) class TestFBLAS3Simple(object): def test_gemm(self): for p in 'sd': f = getattr(fblas, p+'gemm', None) if f is None: continue assert_array_almost_equal(f(3, [3], [-4]), [[-36]]) assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21]) for p in 'cz': f = getattr(fblas, p+'gemm', None) if f is None: continue assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]]) assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j]) def _get_func(func, ps='sdzc'): """Just a helper: return a specified BLAS function w/typecode.""" for p in ps: f = getattr(fblas, p+func, None) if f is None: continue yield f class TestBLAS3Symm(object): def setup_method(self): self.a = np.array([[1., 2.], [0., 1.]]) self.b = np.array([[1., 0., 3.], [0., -1., 2.]]) self.c = np.ones((2, 3)) self.t = np.array([[2., -1., 8.], [3., 0., 9.]]) def test_symm(self): for f in _get_func('symm'): res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) assert_array_almost_equal(res, self.t) res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) assert_array_almost_equal(res, self.t) res = f(a=self.a, b=self.b.T, side=1, c=self.c.T, alpha=1., beta=1.) assert_array_almost_equal(res, self.t.T) def test_summ_wrong_side(self): f = getattr(fblas, 'dsymm', None) if f is not None: assert_raises(Exception, f, **{'a': self.a, 'b': self.b, 'alpha': 1, 'side': 1}) # `side=1` means C <- B*A, hence shapes of A and B are to be # compatible. Otherwise, f2py exception is raised def test_symm_wrong_uplo(self): """SYMM only considers the upper/lower part of A. Hence setting wrong value for `lower` (default is lower=0, meaning upper triangle) gives a wrong result. """ f = getattr(fblas, 'dsymm', None) if f is not None: res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) assert np.allclose(res, self.t) res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) assert not np.allclose(res, self.t) class TestBLAS3Syrk(object): def setup_method(self): self.a = np.array([[1., 0.], [0., -2.], [2., 3.]]) self.t = np.array([[1., 0., 2.], [0., 4., -6.], [2., -6., 13.]]) self.tt = np.array([[5., 6.], [6., 13.]]) def test_syrk(self): for f in _get_func('syrk'): c = f(a=self.a, alpha=1.) assert_array_almost_equal(np.triu(c), np.triu(self.t)) c = f(a=self.a, alpha=1., lower=1) assert_array_almost_equal(np.tril(c), np.tril(self.t)) c0 = np.ones(self.t.shape) c = f(a=self.a, alpha=1., beta=1., c=c0) assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) c = f(a=self.a, alpha=1., trans=1) assert_array_almost_equal(np.triu(c), np.triu(self.tt)) # prints '0-th dimension must be fixed to 3 but got 5', # FIXME: suppress? # FIXME: how to catch the _fblas.error? def test_syrk_wrong_c(self): f = getattr(fblas, 'dsyrk', None) if f is not None: assert_raises(Exception, f, **{'a': self.a, 'alpha': 1., 'c': np.ones((5, 8))}) # if C is supplied, it must have compatible dimensions class TestBLAS3Syr2k(object): def setup_method(self): self.a = np.array([[1., 0.], [0., -2.], [2., 3.]]) self.b = np.array([[0., 1.], [1., 0.], [0, 1.]]) self.t = np.array([[0., -1., 3.], [-1., 0., 0.], [3., 0., 6.]]) self.tt = np.array([[0., 1.], [1., 6]]) def test_syr2k(self): for f in _get_func('syr2k'): c = f(a=self.a, b=self.b, alpha=1.) assert_array_almost_equal(np.triu(c), np.triu(self.t)) c = f(a=self.a, b=self.b, alpha=1., lower=1) assert_array_almost_equal(np.tril(c), np.tril(self.t)) c0 = np.ones(self.t.shape) c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0) assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) c = f(a=self.a, b=self.b, alpha=1., trans=1) assert_array_almost_equal(np.triu(c), np.triu(self.tt)) # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress? def test_syr2k_wrong_c(self): f = getattr(fblas, 'dsyr2k', None) if f is not None: assert_raises(Exception, f, **{'a': self.a, 'b': self.b, 'alpha': 1., 'c': np.zeros((15, 8))}) # if C is supplied, it must have compatible dimensions class TestSyHe(object): """Quick and simple tests for (zc)-symm, syrk, syr2k.""" def setup_method(self): self.sigma_y = np.array([[0., -1.j], [1.j, 0.]]) def test_symm_zc(self): for f in _get_func('symm', 'zc'): # NB: a is symmetric w/upper diag of ONLY res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), np.diag([1, -1])) def test_hemm_zc(self): for f in _get_func('hemm', 'zc'): # NB: a is hermitian w/upper diag of ONLY res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), np.diag([1, 1])) def test_syrk_zr(self): for f in _get_func('syrk', 'zc'): res = f(a=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), np.diag([-1, -1])) def test_herk_zr(self): for f in _get_func('herk', 'zc'): res = f(a=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), np.diag([1, 1])) def test_syr2k_zr(self): for f in _get_func('syr2k', 'zc'): res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1])) def test_her2k_zr(self): for f in _get_func('her2k', 'zc'): res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1])) class TestTRMM(object): """Quick and simple tests for dtrmm.""" def setup_method(self): self.a = np.array([[1., 2., ], [-2., 1.]]) self.b = np.array([[3., 4., -1.], [5., 6., -2.]]) def test_ab(self): f = getattr(fblas, 'dtrmm', None) if f is not None: result = f(1., self.a, self.b) # default a is upper triangular expected = np.array([[13., 16., -5.], [5., 6., -2.]]) assert_array_almost_equal(result, expected) def test_ab_lower(self): f = getattr(fblas, 'dtrmm', None) if f is not None: result = f(1., self.a, self.b, lower=True) expected = np.array([[3., 4., -1.], [-1., -2., 0.]]) # now a is lower triangular assert_array_almost_equal(result, expected) def test_b_overwrites(self): # BLAS dtrmm modifies B argument in-place. # Here the default is to copy, but this can be overridden f = getattr(fblas, 'dtrmm', None) if f is not None: for overwr in [True, False]: bcopy = self.b.copy() result = f(1., self.a, bcopy, overwrite_b=overwr) # C-contiguous arrays are copied assert_(bcopy.flags.f_contiguous is False and np.may_share_memory(bcopy, result) is False) assert_equal(bcopy, self.b) bcopy = np.asfortranarray(self.b.copy()) # or just transpose it result = f(1., self.a, bcopy, overwrite_b=True) assert_(bcopy.flags.f_contiguous is True and np.may_share_memory(bcopy, result) is True) assert_array_almost_equal(bcopy, result) def test_trsm(): seed(1234) for ind, dtype in enumerate(DTYPES): tol = np.finfo(dtype).eps*1000 func, = get_blas_funcs(('trsm',), dtype=dtype) # Test protection against size mismatches A = rand(4, 5).astype(dtype) B = rand(4, 4).astype(dtype) alpha = dtype(1) assert_raises(Exception, func, alpha, A, B) assert_raises(Exception, func, alpha, A.T, B) n = 8 m = 7 alpha = dtype(-2.5) A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m) A = A.astype(dtype) Au = triu(A) Al = tril(A) B1 = rand(m, n).astype(dtype) B2 = rand(n, m).astype(dtype) x1 = func(alpha=alpha, a=A, b=B1) assert_equal(B1.shape, x1.shape) x2 = solve(Au, alpha*B1) assert_allclose(x1, x2, atol=tol) x1 = func(alpha=alpha, a=A, b=B1, trans_a=1) x2 = solve(Au.T, alpha*B1) assert_allclose(x1, x2, atol=tol) x1 = func(alpha=alpha, a=A, b=B1, trans_a=2) x2 = solve(Au.conj().T, alpha*B1) assert_allclose(x1, x2, atol=tol) x1 = func(alpha=alpha, a=A, b=B1, diag=1) Au[arange(m), arange(m)] = dtype(1) x2 = solve(Au, alpha*B1) assert_allclose(x1, x2, atol=tol) x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1) x2 = solve(Au.conj().T, alpha*B2.conj().T) assert_allclose(x1, x2.conj().T, atol=tol) x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1) Al[arange(m), arange(m)] = dtype(1) x2 = solve(Al.conj().T, alpha*B2.conj().T) assert_allclose(x1, x2.conj().T, atol=tol)
39,580
35.649074
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_sketches.py
"""Tests for _sketches.py.""" from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import clarkson_woodruff_transform from numpy.testing import assert_ def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01): """ Make some random data with Gaussian distributed values """ np.random.seed(142352345) res = np.random.normal(mu, sigma, n_rows*n_columns) return np.reshape(res, (n_rows, n_columns)) class TestClarksonWoodruffTransform(object): """ Testing the Clarkson Woodruff Transform """ # Big dense matrix dimensions n_matrix_rows = 2000 n_matrix_columns = 100 # Sketch matrix dimensions n_sketch_rows = 100 # Error threshold threshold = 0.1 dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows, n_matrix_columns) def test_sketch_dimensions(self): sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows) assert_(sketch.shape == (self.n_sketch_rows, self.dense_big_matrix.shape[1])) def test_sketch_rows_norm(self): # Given the probabilistic nature of the sketches # we run the 'test' multiple times and check that # we pass all/almost all the tries n_errors = 0 seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431, 1302443994, 1521083269, 1501189312, 1126232505, 1533465685] for seed_ in seeds: sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows, seed_) # We could use other norms (like L2) err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch) if err > self.threshold: n_errors += 1 assert_(n_errors == 0)
2,000
31.274194
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_decomp_polar.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import norm from numpy.testing import (assert_, assert_allclose, assert_equal) from scipy.linalg import polar, eigh diag2 = np.array([[2, 0], [0, 3]]) a13 = np.array([[1, 2, 2]]) precomputed_cases = [ [[[0]], 'right', [[1]], [[0]]], [[[0]], 'left', [[1]], [[0]]], [[[9]], 'right', [[1]], [[9]]], [[[9]], 'left', [[1]], [[9]]], [diag2, 'right', np.eye(2), diag2], [diag2, 'left', np.eye(2), diag2], [a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])], ] verify_cases = [ [[1, 2], [3, 4]], [[1, 2, 3]], [[1], [2], [3]], [[1, 2, 3], [3, 4, 0]], [[1, 2], [3, 4], [5, 5]], [[1, 2], [3, 4+5j]], [[1, 2, 3j]], [[1], [2], [3j]], [[1, 2, 3+2j], [3, 4-1j, -4j]], [[1, 2], [3-2j, 4+0.5j], [5, 5]], [[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]], ] def check_precomputed_polar(a, side, expected_u, expected_p): # Compare the result of the polar decomposition to a # precomputed result. u, p = polar(a, side=side) assert_allclose(u, expected_u, atol=1e-15) assert_allclose(p, expected_p, atol=1e-15) def verify_polar(a): # Compute the polar decomposition, and then verify that # the result has all the expected properties. product_atol = np.sqrt(np.finfo(float).eps) aa = np.asarray(a) m, n = aa.shape u, p = polar(a, side='right') assert_equal(u.shape, (m, n)) assert_equal(p.shape, (n, n)) # a = up assert_allclose(u.dot(p), a, atol=product_atol) if m >= n: assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) else: assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) # p is Hermitian positive semidefinite. assert_allclose(p.conj().T, p) evals = eigh(p, eigvals_only=True) nonzero_evals = evals[abs(evals) > 1e-14] assert_((nonzero_evals >= 0).all()) u, p = polar(a, side='left') assert_equal(u.shape, (m, n)) assert_equal(p.shape, (m, m)) # a = pu assert_allclose(p.dot(u), a, atol=product_atol) if m >= n: assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) else: assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) # p is Hermitian positive semidefinite. assert_allclose(p.conj().T, p) evals = eigh(p, eigvals_only=True) nonzero_evals = evals[abs(evals) > 1e-14] assert_((nonzero_evals >= 0).all()) def test_precomputed_cases(): for a, side, expected_u, expected_p in precomputed_cases: check_precomputed_polar(a, side, expected_u, expected_p) def test_verify_cases(): for a in verify_cases: verify_polar(a)
2,712
28.172043
66
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_decomp_cholesky.py
from __future__ import division, print_function, absolute_import from numpy.testing import assert_array_almost_equal, assert_array_equal from pytest import raises as assert_raises from numpy import array, transpose, dot, conjugate, zeros_like, empty from numpy.random import random from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \ cho_factor, cho_solve from scipy.linalg._testutils import assert_no_overwrite class TestCholesky(object): def test_simple(self): a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] c = cholesky(a) assert_array_almost_equal(dot(transpose(c), c), a) c = transpose(c) a = dot(c, transpose(c)) assert_array_almost_equal(cholesky(a, lower=1), c) def test_check_finite(self): a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] c = cholesky(a, check_finite=False) assert_array_almost_equal(dot(transpose(c), c), a) c = transpose(c) a = dot(c, transpose(c)) assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c) def test_simple_complex(self): m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]]) a = dot(transpose(conjugate(m)), m) c = cholesky(a) a1 = dot(transpose(conjugate(c)), c) assert_array_almost_equal(a, a1) c = transpose(c) a = dot(c, transpose(conjugate(c))) assert_array_almost_equal(cholesky(a, lower=1), c) def test_random(self): n = 20 for k in range(2): m = random([n, n]) for i in range(n): m[i, i] = 20*(.1+m[i, i]) a = dot(transpose(m), m) c = cholesky(a) a1 = dot(transpose(c), c) assert_array_almost_equal(a, a1) c = transpose(c) a = dot(c, transpose(c)) assert_array_almost_equal(cholesky(a, lower=1), c) def test_random_complex(self): n = 20 for k in range(2): m = random([n, n])+1j*random([n, n]) for i in range(n): m[i, i] = 20*(.1+abs(m[i, i])) a = dot(transpose(conjugate(m)), m) c = cholesky(a) a1 = dot(transpose(conjugate(c)), c) assert_array_almost_equal(a, a1) c = transpose(c) a = dot(c, transpose(conjugate(c))) assert_array_almost_equal(cholesky(a, lower=1), c) class TestCholeskyBanded(object): """Tests for cholesky_banded() and cho_solve_banded.""" def test_check_finite(self): # Symmetric positive definite banded matrix `a` a = array([[4.0, 1.0, 0.0, 0.0], [1.0, 4.0, 0.5, 0.0], [0.0, 0.5, 4.0, 0.2], [0.0, 0.0, 0.2, 4.0]]) # Banded storage form of `a`. ab = array([[-1.0, 1.0, 0.5, 0.2], [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False, check_finite=False) ufac = zeros_like(a) ufac[list(range(4)), list(range(4))] = c[-1] ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] assert_array_almost_equal(a, dot(ufac.T, ufac)) b = array([0.0, 0.5, 4.2, 4.2]) x = cho_solve_banded((c, False), b, check_finite=False) assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) def test_upper_real(self): # Symmetric positive definite banded matrix `a` a = array([[4.0, 1.0, 0.0, 0.0], [1.0, 4.0, 0.5, 0.0], [0.0, 0.5, 4.0, 0.2], [0.0, 0.0, 0.2, 4.0]]) # Banded storage form of `a`. ab = array([[-1.0, 1.0, 0.5, 0.2], [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False) ufac = zeros_like(a) ufac[list(range(4)), list(range(4))] = c[-1] ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] assert_array_almost_equal(a, dot(ufac.T, ufac)) b = array([0.0, 0.5, 4.2, 4.2]) x = cho_solve_banded((c, False), b) assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) def test_upper_complex(self): # Hermitian positive definite banded matrix `a` a = array([[4.0, 1.0, 0.0, 0.0], [1.0, 4.0, 0.5, 0.0], [0.0, 0.5, 4.0, -0.2j], [0.0, 0.0, 0.2j, 4.0]]) # Banded storage form of `a`. ab = array([[-1.0, 1.0, 0.5, -0.2j], [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False) ufac = zeros_like(a) ufac[list(range(4)), list(range(4))] = c[-1] ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0]) x = cho_solve_banded((c, False), b) assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) def test_lower_real(self): # Symmetric positive definite banded matrix `a` a = array([[4.0, 1.0, 0.0, 0.0], [1.0, 4.0, 0.5, 0.0], [0.0, 0.5, 4.0, 0.2], [0.0, 0.0, 0.2, 4.0]]) # Banded storage form of `a`. ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 0.5, 0.2, -1.0]]) c = cholesky_banded(ab, lower=True) lfac = zeros_like(a) lfac[list(range(4)), list(range(4))] = c[0] lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] assert_array_almost_equal(a, dot(lfac, lfac.T)) b = array([0.0, 0.5, 4.2, 4.2]) x = cho_solve_banded((c, True), b) assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) def test_lower_complex(self): # Hermitian positive definite banded matrix `a` a = array([[4.0, 1.0, 0.0, 0.0], [1.0, 4.0, 0.5, 0.0], [0.0, 0.5, 4.0, -0.2j], [0.0, 0.0, 0.2j, 4.0]]) # Banded storage form of `a`. ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 0.5, 0.2j, -1.0]]) c = cholesky_banded(ab, lower=True) lfac = zeros_like(a) lfac[list(range(4)), list(range(4))] = c[0] lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) b = array([0.0, 0.5j, 3.8j, 3.8]) x = cho_solve_banded((c, True), b) assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0]) class TestOverwrite(object): def test_cholesky(self): assert_no_overwrite(cholesky, [(3, 3)]) def test_cho_factor(self): assert_no_overwrite(cho_factor, [(3, 3)]) def test_cho_solve(self): x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]]) xcho = cho_factor(x) assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)]) def test_cholesky_banded(self): assert_no_overwrite(cholesky_banded, [(2, 3)]) def test_cho_solve_banded(self): x = array([[0, -1, -1], [2, 2, 2]]) xcho = cholesky_banded(x) assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b), [(3,)]) class TestEmptyArray(object): def test_cho_factor_empty_square(self): a = empty((0, 0)) b = array([]) c = array([[]]) d = [] e = [[]] x, _ = cho_factor(a) assert_array_equal(x, a) for x in ([b, c, d, e]): assert_raises(ValueError, cho_factor, x)
7,363
34.921951
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_solve_toeplitz.py
"""Test functions for linalg._solve_toeplitz module """ from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg._solve_toeplitz import levinson from scipy.linalg import solve, toeplitz, solve_toeplitz from numpy.testing import assert_equal, assert_allclose import pytest from pytest import raises as assert_raises def test_solve_equivalence(): # For toeplitz matrices, solve_toeplitz() should be equivalent to solve(). random = np.random.RandomState(1234) for n in (1, 2, 3, 10): c = random.randn(n) if random.rand() < 0.5: c = c + 1j * random.randn(n) r = random.randn(n) if random.rand() < 0.5: r = r + 1j * random.randn(n) y = random.randn(n) if random.rand() < 0.5: y = y + 1j * random.randn(n) # Check equivalence when both the column and row are provided. actual = solve_toeplitz((c,r), y) desired = solve(toeplitz(c, r=r), y) assert_allclose(actual, desired) # Check equivalence when the column is provided but not the row. actual = solve_toeplitz(c, b=y) desired = solve(toeplitz(c), y) assert_allclose(actual, desired) def test_multiple_rhs(): random = np.random.RandomState(1234) c = random.randn(4) r = random.randn(4) for offset in [0, 1j]: for yshape in ((4,), (4, 3), (4, 3, 2)): y = random.randn(*yshape) + offset actual = solve_toeplitz((c,r), b=y) desired = solve(toeplitz(c, r=r), y) assert_equal(actual.shape, yshape) assert_equal(desired.shape, yshape) assert_allclose(actual, desired) def test_native_list_arguments(): c = [1,2,4,7] r = [1,3,9,12] y = [5,1,4,2] actual = solve_toeplitz((c,r), y) desired = solve(toeplitz(c, r=r), y) assert_allclose(actual, desired) def test_zero_diag_error(): # The Levinson-Durbin implementation fails when the diagonal is zero. random = np.random.RandomState(1234) n = 4 c = random.randn(n) r = random.randn(n) y = random.randn(n) c[0] = 0 assert_raises(np.linalg.LinAlgError, solve_toeplitz, (c, r), b=y) def test_wikipedia_counterexample(): # The Levinson-Durbin implementation also fails in other cases. # This example is from the talk page of the wikipedia article. random = np.random.RandomState(1234) c = [2, 2, 1] y = random.randn(3) assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y) def test_reflection_coeffs(): # check that that the partial solutions are given by the reflection # coefficients random = np.random.RandomState(1234) y_d = random.randn(10) y_z = random.randn(10) + 1j reflection_coeffs_d = [1] reflection_coeffs_z = [1] for i in range(2, 10): reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1]) reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1]) y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1])) y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1])) _, ref_d = levinson(y_d_concat, b=y_d[1:]) _, ref_z = levinson(y_z_concat, b=y_z[1:]) assert_allclose(reflection_coeffs_d, ref_d[:-1]) assert_allclose(reflection_coeffs_z, ref_z[:-1]) @pytest.mark.xfail(reason='Instability of Levinson iteration') def test_unstable(): # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with # Partial Pivoting for Matrices with Displacement Structure" # Mathematics of Computation, 64, 212 (1995), pp 1557-1576 # which can be unstable for levinson recursion. # other fast toeplitz solvers such as GKO or Burg should be better. random = np.random.RandomState(1234) n = 100 c = 0.9 ** (np.arange(n)**2) y = random.randn(n) solution1 = solve_toeplitz(c, b=y) solution2 = solve(toeplitz(c), y) assert_allclose(solution1, solution2)
4,105
32.112903
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_cython_blas.py
import numpy as np from numpy.testing import (assert_allclose, assert_equal) import scipy.linalg.cython_blas as blas class TestDGEMM(object): def test_transposes(self): a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2] b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2] c = np.empty((2, 4))[:2,:2] blas._test_dgemm(1., a, b, 0., c) assert_allclose(c, a.dot(b)) blas._test_dgemm(1., a.T, b, 0., c) assert_allclose(c, a.T.dot(b)) blas._test_dgemm(1., a, b.T, 0., c) assert_allclose(c, a.dot(b.T)) blas._test_dgemm(1., a.T, b.T, 0., c) assert_allclose(c, a.T.dot(b.T)) blas._test_dgemm(1., a, b, 0., c.T) assert_allclose(c, a.dot(b).T) blas._test_dgemm(1., a.T, b, 0., c.T) assert_allclose(c, a.T.dot(b).T) blas._test_dgemm(1., a, b.T, 0., c.T) assert_allclose(c, a.dot(b.T).T) blas._test_dgemm(1., a.T, b.T, 0., c.T) assert_allclose(c, a.T.dot(b.T).T) def test_shapes(self): a = np.arange(6, dtype='d').reshape((3, 2)) b = np.arange(-6, 2, dtype='d').reshape((2, 4)) c = np.empty((3, 4)) blas._test_dgemm(1., a, b, 0., c) assert_allclose(c, a.dot(b)) blas._test_dgemm(1., b.T, a.T, 0., c.T) assert_allclose(c, b.T.dot(a.T).T) class TestWfuncPointers(object): """ Test the function pointers that are expected to fail on Mac OS X without the additional entry statement in their definitions in fblas_l1.pyf.src. """ def test_complex_args(self): cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64) cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64) assert_allclose(blas._test_cdotc(cx, cy), -17.6468753815+21.3718757629j, 5) assert_allclose(blas._test_cdotu(cx, cy), -6.11562538147+30.3156242371j, 5) assert_equal(blas._test_icamax(cx), 3) assert_allclose(blas._test_scasum(cx), 18.625, 5) assert_allclose(blas._test_scnrm2(cx), 13.1796483994, 5) assert_allclose(blas._test_cdotc(cx[::2], cy[::2]), -18.1000003815+21.2000007629j, 5) assert_allclose(blas._test_cdotu(cx[::2], cy[::2]), -6.10000038147+30.7999992371j, 5) assert_allclose(blas._test_scasum(cx[::2]), 18., 5) assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499, 5) def test_double_args(self): x = np.array([5., -3, -.5], np.float64) y = np.array([2, 1, .5], np.float64) assert_allclose(blas._test_dasum(x), 8.5, 10) assert_allclose(blas._test_ddot(x, y), 6.75, 10) assert_allclose(blas._test_dnrm2(x), 5.85234975815, 10) assert_allclose(blas._test_dasum(x[::2]), 5.5, 10) assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75, 10) assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297, 10) assert_equal(blas._test_idamax(x), 1) def test_float_args(self): x = np.array([5., -3, -.5], np.float32) y = np.array([2, 1, .5], np.float32) assert_equal(blas._test_isamax(x), 1) assert_allclose(blas._test_sasum(x), 8.5, 5) assert_allclose(blas._test_sdot(x, y), 6.75, 5) assert_allclose(blas._test_snrm2(x), 5.85234975815, 5) assert_allclose(blas._test_sasum(x[::2]), 5.5, 5) assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75, 5) assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297, 5) def test_double_complex_args(self): cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128) cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128) assert_equal(blas._test_izamax(cx), 3) assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j, 10) assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j, 10) assert_allclose(blas._test_zdotc(cx[::2], cy[::2]), -18.5625+22.125j, 10) assert_allclose(blas._test_zdotu(cx[::2], cy[::2]), -6.5625+31.875j, 10)
4,233
33.991736
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_cython_lapack.py
from numpy.testing import assert_allclose from scipy.linalg import cython_lapack as cython_lapack from scipy.linalg import lapack class TestLamch(object): def test_slamch(self): for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: assert_allclose(cython_lapack._test_slamch(c), lapack.slamch(c)) def test_dlamch(self): for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: assert_allclose(cython_lapack._test_dlamch(c), lapack.dlamch(c))
582
31.388889
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_lapack.py
# # Created by: Pearu Peterson, September 2002 # from __future__ import division, print_function, absolute_import import sys import subprocess import time from numpy.testing import (assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal) import pytest from pytest import raises as assert_raises import numpy as np from numpy.random import rand, seed from scipy.linalg import _flapack as flapack from scipy.linalg import inv from scipy.linalg import svd from scipy.linalg.lapack import _compute_lwork try: from scipy.linalg import _clapack as clapack except ImportError: clapack = None from scipy.linalg.lapack import get_lapack_funcs from scipy.linalg.blas import get_blas_funcs REAL_DTYPES = [np.float32, np.float64] COMPLEX_DTYPES = [np.complex64, np.complex128] DTYPES = REAL_DTYPES + COMPLEX_DTYPES class TestFlapackSimple(object): def test_gebal(self): a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] a1 = [[1, 0, 0, 3e-4], [4, 0, 0, 2e-3], [7, 1, 0, 0], [0, 1, 0, 0]] for p in 'sdzc': f = getattr(flapack, p+'gebal', None) if f is None: continue ba, lo, hi, pivscale, info = f(a) assert_(not info, repr(info)) assert_array_almost_equal(ba, a) assert_equal((lo, hi), (0, len(a[0])-1)) assert_array_almost_equal(pivscale, np.ones(len(a))) ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1) assert_(not info, repr(info)) # print(a1) # print(ba, lo, hi, pivscale) def test_gehrd(self): a = [[-149, -50, -154], [537, 180, 546], [-27, -9, -25]] for p in 'd': f = getattr(flapack, p+'gehrd', None) if f is None: continue ht, tau, info = f(a) assert_(not info, repr(info)) def test_trsyl(self): a = np.array([[1, 2], [0, 4]]) b = np.array([[5, 6], [0, 8]]) c = np.array([[9, 10], [11, 12]]) trans = 'T' # Test single and double implementations, including most # of the options for dtype in 'fdFD': a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype) trsyl, = get_lapack_funcs(('trsyl',), (a1,)) if dtype.isupper(): # is complex dtype a1[0] += 1j trans = 'C' x, scale, info = trsyl(a1, b1, c1) assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), scale * c1) x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans) assert_array_almost_equal( np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T), scale * c1, decimal=4) x, scale, info = trsyl(a1, b1, c1, isgn=-1) assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), scale * c1, decimal=4) def test_lange(self): a = np.array([ [-149, -50, -154], [537, 180, 546], [-27, -9, -25]]) for dtype in 'fdFD': for norm in 'Mm1OoIiFfEe': a1 = a.astype(dtype) if dtype.isupper(): # is complex dtype a1[0, 0] += 1j lange, = get_lapack_funcs(('lange',), (a1,)) value = lange(norm, a1) if norm in 'FfEe': if dtype in 'Ff': decimal = 3 else: decimal = 7 ref = np.sqrt(np.sum(np.square(np.abs(a1)))) assert_almost_equal(value, ref, decimal) else: if norm in 'Mm': ref = np.max(np.abs(a1)) elif norm in '1Oo': ref = np.max(np.sum(np.abs(a1), axis=0)) elif norm in 'Ii': ref = np.max(np.sum(np.abs(a1), axis=1)) assert_equal(value, ref) class TestLapack(object): def test_flapack(self): if hasattr(flapack, 'empty_module'): # flapack module is empty pass def test_clapack(self): if hasattr(clapack, 'empty_module'): # clapack module is empty pass class TestLeastSquaresSolvers(object): def test_gels(self): seed(1234) # Test fat/tall matrix argument handling - gh-issue #8329 for ind, dtype in enumerate(DTYPES): m = 10 n = 20 nrhs = 1 a1 = rand(m, n).astype(dtype) b1 = rand(n).astype(dtype) gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype) # Request of sizes lwork = _compute_lwork(glslw, m, n, nrhs) _, _, info = gls(a1, b1, lwork=lwork) assert_(info >= 0) _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork) assert_(info >= 0) for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gels, gels_lwork, geqrf = get_lapack_funcs( ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes lwork = _compute_lwork(gels_lwork, m, n, nrhs) lqr, x, info = gels(a1, b1, lwork=lwork) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) lqr_truth, _, _, _ = geqrf(a1) assert_array_equal(lqr, lqr_truth) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gels, gels_lwork, geqrf = get_lapack_funcs( ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes lwork = _compute_lwork(gels_lwork, m, n, nrhs) lqr, x, info = gels(a1, b1, lwork=lwork) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) lqr_truth, _, _, _ = geqrf(a1) assert_array_equal(lqr, lqr_truth) def test_gelsd(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, iwork, info = gelsd_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) iwork_size = iwork x, s, rank, info = gelsd(a1, b1, lwork, iwork_size, -1, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([12.596017180511966, 0.583396253199685], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) rwork_size = int(rwork) iwork_size = iwork x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size, -1, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([13.035514762572043, 4.337666985231382], dtype=dtype), rtol=25*np.finfo(dtype).eps) def test_gelss(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelss_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([12.596017180511966, 0.583396253199685], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelss_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([13.035514762572043, 4.337666985231382], dtype=dtype), rtol=25*np.finfo(dtype).eps) def test_gelsy(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) lwork = int(np.real(work)) jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, lwork, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) lwork = int(np.real(work)) jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, lwork, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) class TestRegression(object): def test_ticket_1645(self): # Check that RQ routines have correct lwork for dtype in DTYPES: a = np.zeros((300, 2), dtype=dtype) gerqf, = get_lapack_funcs(['gerqf'], [a]) assert_raises(Exception, gerqf, a, lwork=2) rq, tau, work, info = gerqf(a) if dtype in REAL_DTYPES: orgrq, = get_lapack_funcs(['orgrq'], [a]) assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1) orgrq(rq[-2:], tau, lwork=2) elif dtype in COMPLEX_DTYPES: ungrq, = get_lapack_funcs(['ungrq'], [a]) assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1) ungrq(rq[-2:], tau, lwork=2) class TestDpotr(object): def test_gh_2691(self): # 'lower' argument of dportf/dpotri for lower in [True, False]: for clean in [True, False]: np.random.seed(42) x = np.random.normal(size=(3, 3)) a = x.dot(x.T) dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, )) c, info = dpotrf(a, lower, clean=clean) dpt = dpotri(c, lower)[0] if lower: assert_allclose(np.tril(dpt), np.tril(inv(a))) else: assert_allclose(np.triu(dpt), np.triu(inv(a))) class TestDlasd4(object): def test_sing_val_update(self): sigmas = np.array([4., 3., 2., 0]) m_vec = np.array([3.12, 5.7, -4.8, -2.2]) M = np.hstack((np.vstack((np.diag(sigmas[0:-1]), np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis])) SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False, check_finite=False) it_len = len(sigmas) sgm = np.concatenate((sigmas[::-1], (sigmas[0] + it_len*np.sqrt(np.sum(np.power(m_vec, 2))),))) mvc = np.concatenate((m_vec[::-1], (0,))) lasd4 = get_lapack_funcs('lasd4', (sigmas,)) roots = [] for i in range(0, it_len): res = lasd4(i, sgm, mvc) roots.append(res[1]) assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \ the singular value %i" % i) roots = np.array(roots)[::-1] assert_((not np.any(np.isnan(roots)), "There are NaN roots")) assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps, rtol=100*np.finfo(np.float64).eps) def test_lartg(): for dtype in 'fdFD': lartg = get_lapack_funcs('lartg', dtype=dtype) f = np.array(3, dtype) g = np.array(4, dtype) if np.iscomplexobj(g): g *= 1j cs, sn, r = lartg(f, g) assert_allclose(cs, 3.0/5.0) assert_allclose(r, 5.0) if np.iscomplexobj(g): assert_allclose(sn, -4.0j/5.0) assert_(type(r) == complex) assert_(type(cs) == float) else: assert_allclose(sn, 4.0/5.0) def test_rot(): # srot, drot from blas and crot and zrot from lapack. for dtype in 'fdFD': c = 0.6 s = 0.8 u = np.ones(4, dtype) * 3 v = np.ones(4, dtype) * 4 atol = 10**-(np.finfo(dtype).precision-1) if dtype in 'fd': rot = get_blas_funcs('rot', dtype=dtype) f = 4 else: rot = get_lapack_funcs('rot', dtype=dtype) s *= -1j v *= 1j f = 4j assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5], [0, 0, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3], [0, 0, f, f]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, offy=2), [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2), [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2), [[3, 3, 5, 5], [0, f, 0, f]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1), [[3, 3, 5, 3], [f, f, 0, f]], atol=atol) assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2), [[5, 3, 5, 3], [0, f, 0, f]], atol=atol) a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1) assert_(a is u) assert_(b is v) assert_allclose(a, [5, 5, 5, 5], atol=atol) assert_allclose(b, [0, 0, 0, 0], atol=atol) def test_larfg_larf(): np.random.seed(1234) a0 = np.random.random((4, 4)) a0 = a0.T.dot(a0) a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4)) a0j = a0j.T.conj().dot(a0j) # our test here will be to do one step of reducing a hermetian matrix to # tridiagonal form using householder transforms. for dtype in 'fdFD': larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype) if dtype in 'FD': a = a0j.copy() else: a = a0.copy() # generate a householder transform to clear a[2:,0] alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0]) # create expected output expected = np.zeros_like(a[:, 0]) expected[0] = a[0, 0] expected[1] = alpha # assemble householder vector v = np.zeros_like(a[1:, 0]) v[0] = 1.0 v[1:] = x # apply transform from the left a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1])) # apply transform from the right a[:, 1:] = larf(v, tau, a[:,1:], np.zeros(a.shape[0]), side='R') assert_allclose(a[:, 0], expected, atol=1e-5) assert_allclose(a[0, :], expected, atol=1e-5) @pytest.mark.xslow def test_sgesdd_lwork_bug_workaround(): # Test that SGESDD lwork is sufficiently large for LAPACK. # # This checks that workaround around an apparent LAPACK bug # actually works. cf. gh-5401 # # xslow: requires 1GB+ of memory p = subprocess.Popen([sys.executable, '-c', 'import numpy as np; ' 'from scipy.linalg import svd; ' 'a = np.zeros([9537, 9537], dtype=np.float32); ' 'svd(a)'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Check if it an error occurred within 5 sec; the computation can # take substantially longer, and we will not wait for it to finish for j in range(50): time.sleep(0.1) if p.poll() is not None: returncode = p.returncode break else: # Didn't exit in time -- probably entered computation. The # error is raised before entering computation, so things are # probably OK. returncode = 0 p.terminate() assert_equal(returncode, 0, "Code apparently failed: " + p.stdout.read()) class TestSytrd(object): def test_sytrd(self): for dtype in REAL_DTYPES: # Assert that a 0x0 matrix raises an error A = np.zeros((0, 0), dtype=dtype) sytrd, sytrd_lwork = \ get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,)) assert_raises(ValueError, sytrd, A) # Tests for n = 1 currently fail with # ``` # ValueError: failed to create intent(cache|hide)|optional array-- # must have defined dimensions but got (0,) # ``` # This is a NumPy issue # <https://github.com/numpy/numpy/issues/9617>. # TODO once the issue has been resolved, test for n=1 # some upper triangular array n = 3 A = np.zeros((n, n), dtype=dtype) A[np.triu_indices_from(A)] = \ np.arange(1, n*(n+1)//2+1, dtype=dtype) # query lwork lwork, info = sytrd_lwork(n) assert_equal(info, 0) # check lower=1 behavior (shouldn't do much since the matrix is # upper triangular) data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork) assert_equal(info, 0) assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0) assert_allclose(d, np.diag(A)) assert_allclose(e, 0.0) assert_allclose(tau, 0.0) # and now for the proper test (lower=0 is the default) data, d, e, tau, info = sytrd(A, lwork=lwork) assert_equal(info, 0) # assert Q^T*A*Q = tridiag(e, d, e) # build tridiagonal matrix T = np.zeros_like(A, dtype=dtype) k = np.arange(A.shape[0]) T[k, k] = d k2 = np.arange(A.shape[0]-1) T[k2+1, k2] = e T[k2, k2+1] = e # build Q Q = np.eye(n, n, dtype=dtype) for i in range(n-1): v = np.zeros(n, dtype=dtype) v[:i] = data[:i, i+1] v[i] = 1.0 H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v) Q = np.dot(H, Q) # Make matrix fully symmetric i_lower = np.tril_indices(n, -1) A[i_lower] = A.T[i_lower] QTAQ = np.dot(Q.T, np.dot(A, Q)) # disable rtol here since some values in QTAQ and T are very close # to 0. assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0) class TestHetrd(object): def test_hetrd(self): for real_dtype, complex_dtype in zip(REAL_DTYPES, COMPLEX_DTYPES): # Assert that a 0x0 matrix raises an error A = np.zeros((0, 0), dtype=complex_dtype) hetrd, hetrd_lwork = \ get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,)) assert_raises(ValueError, hetrd, A) # Tests for n = 1 currently fail with # ``` # ValueError: failed to create intent(cache|hide)|optional array-- # must have defined dimensions but got (0,) # ``` # This is a NumPy issue # <https://github.com/numpy/numpy/issues/9617>. # TODO once the issue has been resolved, test for n=1 # some upper triangular array n = 3 A = np.zeros((n, n), dtype=complex_dtype) A[np.triu_indices_from(A)] = ( np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype) ) np.fill_diagonal(A, np.real(np.diag(A))) # query lwork lwork, info = hetrd_lwork(n) assert_equal(info, 0) # check lower=1 behavior (shouldn't do much since the matrix is # upper triangular) data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork) assert_equal(info, 0) assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0) assert_allclose(d, np.real(np.diag(A))) assert_allclose(e, 0.0) assert_allclose(tau, 0.0) # and now for the proper test (lower=0 is the default) data, d, e, tau, info = hetrd(A, lwork=lwork) assert_equal(info, 0) # assert Q^T*A*Q = tridiag(e, d, e) # build tridiagonal matrix T = np.zeros_like(A, dtype=real_dtype) k = np.arange(A.shape[0], dtype=int) T[k, k] = d k2 = np.arange(A.shape[0]-1, dtype=int) T[k2+1, k2] = e T[k2, k2+1] = e # build Q Q = np.eye(n, n, dtype=complex_dtype) for i in range(n-1): v = np.zeros(n, dtype=complex_dtype) v[:i] = data[:i, i+1] v[i] = 1.0 H = np.eye(n, n, dtype=complex_dtype) \ - tau[i] * np.outer(v, np.conj(v)) Q = np.dot(H, Q) # Make matrix fully Hermetian i_lower = np.tril_indices(n, -1) A[i_lower] = np.conj(A.T[i_lower]) QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q)) # disable rtol here since some values in QTAQ and T are very close # to 0. assert_allclose( QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0 ) def test_gglse(): # Example data taken from NAG manual for ind, dtype in enumerate(DTYPES): # DTYPES = <s,d,c,z> gglse func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'), dtype=dtype) lwork = _compute_lwork(func_lwork, m=6, n=4, p=2) # For <s,d>gglse if ind < 2: a = np.array([[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.30, 0.24, 0.40, -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.30, 0.15, -2.13], [-0.02, 1.03, -1.43, 0.50]], dtype=dtype) c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype) d = np.array([0., 0.], dtype=dtype) # For <s,d>gglse else: a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j], [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j], [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j], [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j], [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j], [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]]) c = np.array([[-2.54+0.09j], [1.65-2.26j], [-2.11-3.96j], [1.82+3.30j], [-6.41+3.77j], [2.07+0.66j]]) d = np.zeros(2, dtype=dtype) b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype) _, _, _, result, _ = func(a, b, c, d, lwork=lwork) if ind < 2: expected = np.array([0.48904455, 0.99754786, 0.48904455, 0.99754786]) else: expected = np.array([1.08742917-1.96205783j, -0.74093902+3.72973919j, 1.08742917-1.96205759j, -0.74093896+3.72973895j]) assert_array_almost_equal(result, expected, decimal=4) def test_sycon_hecon(): seed(1234) for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): # DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon n = 10 # For <s,d,c,z>sycon if ind < 4: func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype) funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype) A = (rand(n, n)).astype(dtype) # For <c,z>hecon else: func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype) funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype) A = (rand(n, n) + rand(n, n)*1j).astype(dtype) # Since sycon only refers to upper/lower part, conj() is safe here. A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype) anorm = np.linalg.norm(A, 1) lwork = _compute_lwork(func_lwork, n) ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1) rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1) # The error is at most 1-fold assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1) def test_sygst(): seed(1234) for ind, dtype in enumerate(REAL_DTYPES): # DTYPES = <s,d> sygst n = 10 potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst', 'syevd', 'sygvd'), dtype=dtype) A = rand(n, n).astype(dtype) A = (A + A.T)/2 # B must be positive definite B = rand(n, n).astype(dtype) B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype) # Perform eig (sygvd) _, eig_gvd, info = sygvd(A, B) assert_(info == 0) # Convert to std problem potrf b, info = potrf(B) assert_(info == 0) a, info = sygst(A, b) assert_(info == 0) eig, _, info = syevd(a) assert_(info == 0) assert_allclose(eig, eig_gvd, rtol=1e-4) def test_hegst(): seed(1234) for ind, dtype in enumerate(COMPLEX_DTYPES): # DTYPES = <c,z> hegst n = 10 potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst', 'heevd', 'hegvd'), dtype=dtype) A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) A = (A + A.conj().T)/2 # B must be positive definite B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype) # Perform eig (hegvd) _, eig_gvd, info = hegvd(A, B) assert_(info == 0) # Convert to std problem potrf b, info = potrf(B) assert_(info == 0) a, info = hegst(A, b) assert_(info == 0) eig, _, info = heevd(a) assert_(info == 0) assert_allclose(eig, eig_gvd, rtol=1e-4)
32,030
35.234163
104
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/linalg/tests/test_decomp_update.py
from __future__ import division, print_function, absolute_import import itertools import numpy as np from numpy.testing import assert_, assert_allclose, assert_equal from pytest import raises as assert_raises from scipy import linalg import scipy.linalg._decomp_update as _decomp_update from scipy.linalg._decomp_update import * def assert_unitary(a, rtol=None, atol=None, assert_sqr=True): if rtol is None: rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) if atol is None: atol = 2*np.finfo(a.dtype).eps if assert_sqr: assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square') aTa = np.dot(a.T.conj(), a) assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol) def assert_upper_tri(a, rtol=None, atol=None): if rtol is None: rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) if atol is None: atol = 2*np.finfo(a.dtype).eps mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_) assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol) def check_qr(q, r, a, rtol, atol, assert_sqr=True): assert_unitary(q, rtol, atol, assert_sqr) assert_upper_tri(r, rtol, atol) assert_allclose(q.dot(r), a, rtol=rtol, atol=atol) def make_strided(arrs): strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)] kmax = len(strides) k = 0 ret = [] for a in arrs: if a.ndim == 1: s = strides[k % kmax] k += 1 base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype) view = base[s[1]::s[0]] view[...] = a elif a.ndim == 2: s = strides[k % kmax] t = strides[(k+1) % kmax] k += 2 base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]), a.dtype) view = base[s[1]::s[0], t[1]::t[0]] view[...] = a else: raise ValueError('make_strided only works for ndim = 1 or 2 arrays') ret.append(view) return ret def negate_strides(arrs): ret = [] for a in arrs: b = np.zeros_like(a) if b.ndim == 2: b = b[::-1, ::-1] elif b.ndim == 1: b = b[::-1] else: raise ValueError('negate_strides only works for ndim = 1 or 2 arrays') b[...] = a ret.append(b) return ret def nonitemsize_strides(arrs): out = [] for a in arrs: a_dtype = a.dtype b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')]) c = b.getfield(a_dtype) c[...] = a out.append(c) return out def make_nonnative(arrs): out = [] for a in arrs: out.append(a.astype(a.dtype.newbyteorder())) return out class BaseQRdeltas(object): def setup_method(self): self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2) self.atol = 10 * np.finfo(self.dtype).eps def generate(self, type, mode='full'): np.random.seed(29382) shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12), 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type] a = np.random.random(shape) if np.iscomplexobj(self.dtype.type(1)): b = np.random.random(shape) a = a + 1j * b a = a.astype(self.dtype) q, r = linalg.qr(a, mode=mode) return a, q, r class BaseQRdelete(BaseQRdeltas): def test_sqr_1_row(self): a, q, r = self.generate('sqr') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_p_row(self): a, q, r = self.generate('sqr') for ndel in range(2, 6): for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_1_col(self): a, q, r = self.generate('sqr') for col in range(r.shape[1]): q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) a1 = np.delete(a, col, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_p_col(self): a, q, r = self.generate('sqr') for ndel in range(2, 6): for col in range(r.shape[1]-ndel): q1, r1 = qr_delete(q, r, col, ndel, which='col', overwrite_qr=False) a1 = np.delete(a, slice(col, col+ndel), 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_1_row(self): a, q, r = self.generate('tall') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_p_row(self): a, q, r = self.generate('tall') for ndel in range(2, 6): for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_1_col(self): a, q, r = self.generate('tall') for col in range(r.shape[1]): q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) a1 = np.delete(a, col, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_p_col(self): a, q, r = self.generate('tall') for ndel in range(2, 6): for col in range(r.shape[1]-ndel): q1, r1 = qr_delete(q, r, col, ndel, which='col', overwrite_qr=False) a1 = np.delete(a, slice(col, col+ndel), 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_1_row(self): a, q, r = self.generate('fat') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_p_row(self): a, q, r = self.generate('fat') for ndel in range(2, 6): for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_1_col(self): a, q, r = self.generate('fat') for col in range(r.shape[1]): q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) a1 = np.delete(a, col, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_p_col(self): a, q, r = self.generate('fat') for ndel in range(2, 6): for col in range(r.shape[1]-ndel): q1, r1 = qr_delete(q, r, col, ndel, which='col', overwrite_qr=False) a1 = np.delete(a, slice(col, col+ndel), 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_economic_1_row(self): # this test always starts and ends with an economic decomp. a, q, r = self.generate('tall', 'economic') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) # for economic row deletes # eco - prow = eco # eco - prow = sqr # eco - prow = fat def base_economic_p_row_xxx(self, ndel): a, q, r = self.generate('tall', 'economic') for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_p_row_economic(self): # (12, 7) - (3, 7) = (9,7) --> stays economic self.base_economic_p_row_xxx(3) def test_economic_p_row_sqr(self): # (12, 7) - (5, 7) = (7, 7) --> becomes square self.base_economic_p_row_xxx(5) def test_economic_p_row_fat(self): # (12, 7) - (7,7) = (5, 7) --> becomes fat self.base_economic_p_row_xxx(7) def test_economic_1_col(self): a, q, r = self.generate('tall', 'economic') for col in range(r.shape[1]): q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) a1 = np.delete(a, col, 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_p_col(self): a, q, r = self.generate('tall', 'economic') for ndel in range(2, 6): for col in range(r.shape[1]-ndel): q1, r1 = qr_delete(q, r, col, ndel, which='col', overwrite_qr=False) a1 = np.delete(a, slice(col, col+ndel), 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_1_row(self): a, q, r = self.generate('Mx1') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_p_row(self): a, q, r = self.generate('Mx1') for ndel in range(2, 6): for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_1_col(self): a, q, r = self.generate('1xN') for col in range(r.shape[1]): q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) a1 = np.delete(a, col, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_p_col(self): a, q, r = self.generate('1xN') for ndel in range(2, 6): for col in range(r.shape[1]-ndel): q1, r1 = qr_delete(q, r, col, ndel, which='col', overwrite_qr=False) a1 = np.delete(a, slice(col, col+ndel), 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_economic_1_row(self): a, q, r = self.generate('Mx1', 'economic') for row in range(r.shape[0]): q1, r1 = qr_delete(q, r, row, overwrite_qr=False) a1 = np.delete(a, row, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_economic_p_row(self): a, q, r = self.generate('Mx1', 'economic') for ndel in range(2, 6): for row in range(a.shape[0]-ndel): q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) a1 = np.delete(a, slice(row, row+ndel), 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_delete_last_1_row(self): # full and eco are the same for 1xN a, q, r = self.generate('1xN') q1, r1 = qr_delete(q, r, 0, 1, 'row') assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) def test_delete_last_p_row(self): a, q, r = self.generate('tall', 'full') q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) a, q, r = self.generate('tall', 'economic') q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) def test_delete_last_1_col(self): a, q, r = self.generate('Mx1', 'economic') q1, r1 = qr_delete(q, r, 0, 1, 'col') assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) a, q, r = self.generate('Mx1', 'full') q1, r1 = qr_delete(q, r, 0, 1, 'col') assert_unitary(q1) assert_(q1.dtype == q.dtype) assert_(q1.shape == q.shape) assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) def test_delete_last_p_col(self): a, q, r = self.generate('tall', 'full') q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') assert_unitary(q1) assert_(q1.dtype == q.dtype) assert_(q1.shape == q.shape) assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) a, q, r = self.generate('tall', 'economic') q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) def test_delete_1x1_row_col(self): a, q, r = self.generate('1x1') q1, r1 = qr_delete(q, r, 0, 1, 'row') assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) a, q, r = self.generate('1x1') q1, r1 = qr_delete(q, r, 0, 1, 'col') assert_unitary(q1) assert_(q1.dtype == q.dtype) assert_(q1.shape == q.shape) assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) # all full qr, row deletes and single column deletes should be able to # handle any non negative strides. (only row and column vector # operations are used.) p column delete require fortran ordered # Q and R and will make a copy as necessary. Economic qr row deletes # requre a contigous q. def base_non_simple_strides(self, adjust_strides, ks, p, which, overwriteable): if which == 'row': qind = (slice(p,None), slice(p,None)) rind = (slice(p,None), slice(None)) else: qind = (slice(None), slice(None)) rind = (slice(None), slice(None,-p)) for type, k in itertools.product(['sqr', 'tall', 'fat'], ks): a, q0, r0, = self.generate(type) qs, rs = adjust_strides((q0, r0)) if p == 1: a1 = np.delete(a, k, 0 if which == 'row' else 1) else: s = slice(k,k+p) if k < 0: s = slice(k, k + p + (a.shape[0] if which == 'row' else a.shape[1])) a1 = np.delete(a, s, 0 if which == 'row' else 1) # for each variable, q, r we try with it strided and # overwrite=False. Then we try with overwrite=True, and make # sure that q and r are still overwritten. q = q0.copy('F') r = r0.copy('F') q1, r1 = qr_delete(qs, r, k, p, which, False) check_qr(q1, r1, a1, self.rtol, self.atol) q1o, r1o = qr_delete(qs, r, k, p, which, True) check_qr(q1o, r1o, a1, self.rtol, self.atol) if overwriteable: assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol) assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') q2, r2 = qr_delete(q, rs, k, p, which, False) check_qr(q2, r2, a1, self.rtol, self.atol) q2o, r2o = qr_delete(q, rs, k, p, which, True) check_qr(q2o, r2o, a1, self.rtol, self.atol) if overwriteable: assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol) assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') # since some of these were consumed above qs, rs = adjust_strides((q, r)) q3, r3 = qr_delete(qs, rs, k, p, which, False) check_qr(q3, r3, a1, self.rtol, self.atol) q3o, r3o = qr_delete(qs, rs, k, p, which, True) check_qr(q3o, r3o, a1, self.rtol, self.atol) if overwriteable: assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol) assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol) def test_non_unit_strides_1_row(self): self.base_non_simple_strides(make_strided, [0], 1, 'row', True) def test_non_unit_strides_p_row(self): self.base_non_simple_strides(make_strided, [0], 3, 'row', True) def test_non_unit_strides_1_col(self): self.base_non_simple_strides(make_strided, [0], 1, 'col', True) def test_non_unit_strides_p_col(self): self.base_non_simple_strides(make_strided, [0], 3, 'col', False) def test_neg_strides_1_row(self): self.base_non_simple_strides(negate_strides, [0], 1, 'row', False) def test_neg_strides_p_row(self): self.base_non_simple_strides(negate_strides, [0], 3, 'row', False) def test_neg_strides_1_col(self): self.base_non_simple_strides(negate_strides, [0], 1, 'col', False) def test_neg_strides_p_col(self): self.base_non_simple_strides(negate_strides, [0], 3, 'col', False) def test_non_itemize_strides_1_row(self): self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False) def test_non_itemize_strides_p_row(self): self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False) def test_non_itemize_strides_1_col(self): self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False) def test_non_itemize_strides_p_col(self): self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False) def test_non_native_byte_order_1_row(self): self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False) def test_non_native_byte_order_p_row(self): self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False) def test_non_native_byte_order_1_col(self): self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False) def test_non_native_byte_order_p_col(self): self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False) def test_neg_k(self): a, q, r = self.generate('sqr') for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']): q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False) if w == 'row': a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0) else: a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1) check_qr(q1, r1, a1, self.rtol, self.atol) def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'): assert_sqr = True if mode == 'full' else False if which == 'row': qind = (slice(p,None), slice(p,None)) rind = (slice(p,None), slice(None)) else: qind = (slice(None), slice(None)) rind = (slice(None), slice(None,-p)) a, q0, r0 = self.generate('sqr', mode) if p == 1: a1 = np.delete(a, 3, 0 if which == 'row' else 1) else: a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1) # don't overwrite q = q0.copy('F') r = r0.copy('F') q1, r1 = qr_delete(q, r, 3, p, which, False) check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr) check_qr(q, r, a, self.rtol, self.atol, assert_sqr) if test_F: q = q0.copy('F') r = r0.copy('F') q2, r2 = qr_delete(q, r, 3, p, which, True) check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr) # verify the overwriting assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol) assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol) if test_C: q = q0.copy('C') r = r0.copy('C') q3, r3 = qr_delete(q, r, 3, p, which, True) check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr) assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol) assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol) def test_overwrite_qr_1_row(self): # any positively strided q and r. self.base_overwrite_qr('row', 1, True, True) def test_overwrite_economic_qr_1_row(self): # Any contiguous q and positively strided r. self.base_overwrite_qr('row', 1, True, True, 'economic') def test_overwrite_qr_1_col(self): # any positively strided q and r. # full and eco share code paths self.base_overwrite_qr('col', 1, True, True) def test_overwrite_qr_p_row(self): # any positively strided q and r. self.base_overwrite_qr('row', 3, True, True) def test_overwrite_economic_qr_p_row(self): # any contiguous q and positively strided r self.base_overwrite_qr('row', 3, True, True, 'economic') def test_overwrite_qr_p_col(self): # only F orderd q and r can be overwritten for cols # full and eco share code paths self.base_overwrite_qr('col', 3, False, True) def test_bad_which(self): a, q, r = self.generate('sqr') assert_raises(ValueError, qr_delete, q, r, 0, which='foo') def test_bad_k(self): a, q, r = self.generate('tall') assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1) assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1) assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col') assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col') def test_bad_p(self): a, q, r = self.generate('tall') # p must be positive assert_raises(ValueError, qr_delete, q, r, 0, -1) assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col') # and nonzero assert_raises(ValueError, qr_delete, q, r, 0, 0) assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col') # must have at least k+p rows or cols, depending. assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2) assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col') def test_empty_q(self): a, q, r = self.generate('tall') # same code path for 'row' and 'col' assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1) def test_empty_r(self): a, q, r = self.generate('tall') # same code path for 'row' and 'col' assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1) def test_mismatched_q_and_r(self): a, q, r = self.generate('tall') r = r[1:] assert_raises(ValueError, qr_delete, q, r, 0, 1) def test_unsupported_dtypes(self): dts = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'longdouble', 'longcomplex', 'bool'] a, q0, r0 = self.generate('tall') for dtype in dts: q = q0.real.astype(dtype) r = r0.real.astype(dtype) assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row') assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col') assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row') assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col') def test_check_finite(self): a0, q0, r0 = self.generate('tall') q = q0.copy('F') q[1,1] = np.nan assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row') assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col') r = r0.copy('F') r[1,1] = np.nan assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row') assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col') def test_qr_scalar(self): a, q, r = self.generate('1x1') assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row') assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row') assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col') assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col') class TestQRdelete_f(BaseQRdelete): dtype = np.dtype('f') class TestQRdelete_F(BaseQRdelete): dtype = np.dtype('F') class TestQRdelete_d(BaseQRdelete): dtype = np.dtype('d') class TestQRdelete_D(BaseQRdelete): dtype = np.dtype('D') class BaseQRinsert(BaseQRdeltas): def generate(self, type, mode='full', which='row', p=1): a, q, r = super(BaseQRinsert, self).generate(type, mode) assert_(p > 0) # super call set the seed... if which == 'row': if p == 1: u = np.random.random(a.shape[1]) else: u = np.random.random((p, a.shape[1])) elif which == 'col': if p == 1: u = np.random.random(a.shape[0]) else: u = np.random.random((a.shape[0], p)) else: ValueError('which should be either "row" or "col"') if np.iscomplexobj(self.dtype.type(1)): b = np.random.random(u.shape) u = u + 1j * b u = u.astype(self.dtype) return a, q, r, u def test_sqr_1_row(self): a, q, r, u = self.generate('sqr', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_p_row(self): # sqr + rows --> fat always a, q, r, u = self.generate('sqr', which='row', p=3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_1_col(self): a, q, r, u = self.generate('sqr', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_p_col(self): # sqr + cols --> fat always a, q, r, u = self.generate('sqr', which='col', p=3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_1_row(self): a, q, r, u = self.generate('tall', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_p_row(self): # tall + rows --> tall always a, q, r, u = self.generate('tall', which='row', p=3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_1_col(self): a, q, r, u = self.generate('tall', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) # for column adds to tall matrices there are three cases to test # tall + pcol --> tall # tall + pcol --> sqr # tall + pcol --> fat def base_tall_p_col_xxx(self, p): a, q, r, u = self.generate('tall', which='col', p=p) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(p, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_p_col_tall(self): # 12x7 + 12x3 = 12x10 --> stays tall self.base_tall_p_col_xxx(3) def test_tall_p_col_sqr(self): # 12x7 + 12x5 = 12x12 --> becomes sqr self.base_tall_p_col_xxx(5) def test_tall_p_col_fat(self): # 12x7 + 12x7 = 12x14 --> becomes fat self.base_tall_p_col_xxx(7) def test_fat_1_row(self): a, q, r, u = self.generate('fat', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) # for row adds to fat matrices there are three cases to test # fat + prow --> fat # fat + prow --> sqr # fat + prow --> tall def base_fat_p_row_xxx(self, p): a, q, r, u = self.generate('fat', which='row', p=p) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(p, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_p_row_fat(self): # 7x12 + 3x12 = 10x12 --> stays fat self.base_fat_p_row_xxx(3) def test_fat_p_row_sqr(self): # 7x12 + 5x12 = 12x12 --> becomes sqr self.base_fat_p_row_xxx(5) def test_fat_p_row_tall(self): # 7x12 + 7x12 = 14x12 --> becomes tall self.base_fat_p_row_xxx(7) def test_fat_1_col(self): a, q, r, u = self.generate('fat', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_p_col(self): # fat + cols --> fat always a, q, r, u = self.generate('fat', which='col', p=3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_economic_1_row(self): a, q, r, u = self.generate('tall', 'economic', 'row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_p_row(self): # tall + rows --> tall always a, q, r, u = self.generate('tall', 'economic', 'row', 3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_1_col(self): a, q, r, u = self.generate('tall', 'economic', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_1_col_bad_update(self): # When the column to be added lies in the span of Q, the update is # not meaningful. This is detected, and a LinAlgError is issued. q = np.eye(5, 3, dtype=self.dtype) r = np.eye(3, dtype=self.dtype) u = np.array([1, 0, 0, 0, 0], self.dtype) assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col') # for column adds to economic matrices there are three cases to test # eco + pcol --> eco # eco + pcol --> sqr # eco + pcol --> fat def base_economic_p_col_xxx(self, p): a, q, r, u = self.generate('tall', 'economic', which='col', p=p) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(p, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_p_col_eco(self): # 12x7 + 12x3 = 12x10 --> stays eco self.base_economic_p_col_xxx(3) def test_economic_p_col_sqr(self): # 12x7 + 12x5 = 12x12 --> becomes sqr self.base_economic_p_col_xxx(5) def test_economic_p_col_fat(self): # 12x7 + 12x7 = 12x14 --> becomes fat self.base_economic_p_col_xxx(7) def test_Mx1_1_row(self): a, q, r, u = self.generate('Mx1', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_p_row(self): a, q, r, u = self.generate('Mx1', which='row', p=3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_1_col(self): a, q, r, u = self.generate('Mx1', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_p_col(self): a, q, r, u = self.generate('Mx1', which='col', p=3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_economic_1_row(self): a, q, r, u = self.generate('Mx1', 'economic', 'row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_economic_p_row(self): a, q, r, u = self.generate('Mx1', 'economic', 'row', 3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_economic_1_col(self): a, q, r, u = self.generate('Mx1', 'economic', 'col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_economic_p_col(self): a, q, r, u = self.generate('Mx1', 'economic', 'col', 3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_1xN_1_row(self): a, q, r, u = self.generate('1xN', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_p_row(self): a, q, r, u = self.generate('1xN', which='row', p=3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_1_col(self): a, q, r, u = self.generate('1xN', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_p_col(self): a, q, r, u = self.generate('1xN', which='col', p=3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_1_row(self): a, q, r, u = self.generate('1x1', which='row') for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row, u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_p_row(self): a, q, r, u = self.generate('1x1', which='row', p=3) for row in range(r.shape[0] + 1): q1, r1 = qr_insert(q, r, u, row) a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_1_col(self): a, q, r, u = self.generate('1x1', which='col') for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col, u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_p_col(self): a, q, r, u = self.generate('1x1', which='col', p=3) for col in range(r.shape[1] + 1): q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_1_scalar(self): a, q, r, u = self.generate('1x1', which='row') assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row') assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row') assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row') assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col') assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col') assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col') def base_non_simple_strides(self, adjust_strides, k, p, which): for type in ['sqr', 'tall', 'fat']: a, q0, r0, u0 = self.generate(type, which=which, p=p) qs, rs, us = adjust_strides((q0, r0, u0)) if p == 1: ai = np.insert(a, k, u0, 0 if which == 'row' else 1) else: ai = np.insert(a, k*np.ones(p, np.intp), u0 if which == 'row' else u0, 0 if which == 'row' else 1) # for each variable, q, r, u we try with it strided and # overwrite=False. Then we try with overwrite=True. Nothing # is checked to see if it can be overwritten, since only # F ordered Q can be overwritten when adding columns. q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False) check_qr(q1, r1, ai, self.rtol, self.atol) q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True) check_qr(q1o, r1o, ai, self.rtol, self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False) check_qr(q2, r2, ai, self.rtol, self.atol) q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True) check_qr(q2o, r2o, ai, self.rtol, self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False) check_qr(q3, r3, ai, self.rtol, self.atol) q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True) check_qr(q3o, r3o, ai, self.rtol, self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') # since some of these were consumed above qs, rs, us = adjust_strides((q, r, u)) q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False) check_qr(q5, r5, ai, self.rtol, self.atol) q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True) check_qr(q5o, r5o, ai, self.rtol, self.atol) def test_non_unit_strides_1_row(self): self.base_non_simple_strides(make_strided, 0, 1, 'row') def test_non_unit_strides_p_row(self): self.base_non_simple_strides(make_strided, 0, 3, 'row') def test_non_unit_strides_1_col(self): self.base_non_simple_strides(make_strided, 0, 1, 'col') def test_non_unit_strides_p_col(self): self.base_non_simple_strides(make_strided, 0, 3, 'col') def test_neg_strides_1_row(self): self.base_non_simple_strides(negate_strides, 0, 1, 'row') def test_neg_strides_p_row(self): self.base_non_simple_strides(negate_strides, 0, 3, 'row') def test_neg_strides_1_col(self): self.base_non_simple_strides(negate_strides, 0, 1, 'col') def test_neg_strides_p_col(self): self.base_non_simple_strides(negate_strides, 0, 3, 'col') def test_non_itemsize_strides_1_row(self): self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row') def test_non_itemsize_strides_p_row(self): self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row') def test_non_itemsize_strides_1_col(self): self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col') def test_non_itemsize_strides_p_col(self): self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col') def test_non_native_byte_order_1_row(self): self.base_non_simple_strides(make_nonnative, 0, 1, 'row') def test_non_native_byte_order_p_row(self): self.base_non_simple_strides(make_nonnative, 0, 3, 'row') def test_non_native_byte_order_1_col(self): self.base_non_simple_strides(make_nonnative, 0, 1, 'col') def test_non_native_byte_order_p_col(self): self.base_non_simple_strides(make_nonnative, 0, 3, 'col') def test_overwrite_qu_rank_1(self): # when inserting rows, the size of both Q and R change, so only # column inserts can overwrite q. Only complex column inserts # with C ordered Q overwrite u. Any contiguous Q is overwritten # when inserting 1 column a, q0, r, u, = self.generate('sqr', which='col', p=1) q = q0.copy('C') u0 = u.copy() # don't overwrite q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) a1 = np.insert(a, 0, u0, 1) check_qr(q1, r1, a1, self.rtol, self.atol) check_qr(q, r, a, self.rtol, self.atol) # try overwriting q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) check_qr(q2, r2, a1, self.rtol, self.atol) # verify the overwriting assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) assert_allclose(u, u0.conj(), self.rtol, self.atol) # now try with a fortran ordered Q qF = q0.copy('F') u1 = u0.copy() q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False) check_qr(q3, r3, a1, self.rtol, self.atol) check_qr(qF, r, a, self.rtol, self.atol) # try overwriting q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True) check_qr(q4, r4, a1, self.rtol, self.atol) assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol) def test_overwrite_qu_rank_p(self): # when inserting rows, the size of both Q and R change, so only # column inserts can potentially overwrite Q. In practice, only # F ordered Q are overwritten with a rank p update. a, q0, r, u, = self.generate('sqr', which='col', p=3) q = q0.copy('F') a1 = np.insert(a, np.zeros(3, np.intp), u, 1) # don't overwrite q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) check_qr(q1, r1, a1, self.rtol, self.atol) check_qr(q, r, a, self.rtol, self.atol) # try overwriting q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) check_qr(q2, r2, a1, self.rtol, self.atol) assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) def test_empty_inputs(self): a, q, r, u = self.generate('sqr', which='row') assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row') assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row') assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row') assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col') assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col') assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col') def test_mismatched_shapes(self): a, q, r, u = self.generate('tall', which='row') assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row') assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row') assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row') assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col') assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col') assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col') def test_unsupported_dtypes(self): dts = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'longdouble', 'longcomplex', 'bool'] a, q0, r0, u0 = self.generate('sqr', which='row') for dtype in dts: q = q0.real.astype(dtype) r = r0.real.astype(dtype) u = u0.real.astype(dtype) assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') def test_check_finite(self): a0, q0, r0, u0 = self.generate('sqr', which='row', p=3) q = q0.copy('F') q[1,1] = np.nan assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row') assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col') assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') r = r0.copy('F') r[1,1] = np.nan assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row') assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col') assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') u = u0.copy('F') u[0,0] = np.nan assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row') assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col') assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') class TestQRinsert_f(BaseQRinsert): dtype = np.dtype('f') class TestQRinsert_F(BaseQRinsert): dtype = np.dtype('F') class TestQRinsert_d(BaseQRinsert): dtype = np.dtype('d') class TestQRinsert_D(BaseQRinsert): dtype = np.dtype('D') class BaseQRupdate(BaseQRdeltas): def generate(self, type, mode='full', p=1): a, q, r = super(BaseQRupdate, self).generate(type, mode) # super call set the seed... if p == 1: u = np.random.random(q.shape[0]) v = np.random.random(r.shape[1]) else: u = np.random.random((q.shape[0], p)) v = np.random.random((r.shape[1], p)) if np.iscomplexobj(self.dtype.type(1)): b = np.random.random(u.shape) u = u + 1j * b c = np.random.random(v.shape) v = v + 1j * c u = u.astype(self.dtype) v = v.astype(self.dtype) return a, q, r, u, v def test_sqr_rank_1(self): a, q, r, u, v = self.generate('sqr') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_sqr_rank_p(self): # test ndim = 2, rank 1 updates here too for p in [1, 2, 3, 5]: a, q, r, u, v = self.generate('sqr', p=p) if p == 1: u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_rank_1(self): a, q, r, u, v = self.generate('tall') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_tall_rank_p(self): for p in [1, 2, 3, 5]: a, q, r, u, v = self.generate('tall', p=p) if p == 1: u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_rank_1(self): a, q, r, u, v = self.generate('fat') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_fat_rank_p(self): for p in [1, 2, 3, 5]: a, q, r, u, v = self.generate('fat', p=p) if p == 1: u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_economic_rank_1(self): a, q, r, u, v = self.generate('tall', 'economic') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_economic_rank_p(self): for p in [1, 2, 3, 5]: a, q, r, u, v = self.generate('tall', 'economic', p) if p == 1: u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_rank_1(self): a, q, r, u, v = self.generate('Mx1') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_rank_p(self): # when M or N == 1, only a rank 1 update is allowed. This isn't # fundamental limitation, but the code does not support it. a, q, r, u, v = self.generate('Mx1', p=1) u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_Mx1_economic_rank_1(self): a, q, r, u, v = self.generate('Mx1', 'economic') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_Mx1_economic_rank_p(self): # when M or N == 1, only a rank 1 update is allowed. This isn't # fundamental limitation, but the code does not support it. a, q, r, u, v = self.generate('Mx1', 'economic', p=1) u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol, False) def test_1xN_rank_1(self): a, q, r, u, v = self.generate('1xN') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1xN_rank_p(self): # when M or N == 1, only a rank 1 update is allowed. This isn't # fundamental limitation, but the code does not support it. a, q, r, u, v = self.generate('1xN', p=1) u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_rank_1(self): a, q, r, u, v = self.generate('1x1') q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.outer(u, v.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_rank_p(self): # when M or N == 1, only a rank 1 update is allowed. This isn't # fundamental limitation, but the code does not support it. a, q, r, u, v = self.generate('1x1', p=1) u = u.reshape(u.size, 1) v = v.reshape(v.size, 1) q1, r1 = qr_update(q, r, u, v, False) a1 = a + np.dot(u, v.T.conj()) check_qr(q1, r1, a1, self.rtol, self.atol) def test_1x1_rank_1_scalar(self): a, q, r, u, v = self.generate('1x1') assert_raises(ValueError, qr_update, q[0, 0], r, u, v) assert_raises(ValueError, qr_update, q, r[0, 0], u, v) assert_raises(ValueError, qr_update, q, r, u[0], v) assert_raises(ValueError, qr_update, q, r, u, v[0]) def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable): assert_sqr = False if mode == 'economic' else True for type in ['sqr', 'tall', 'fat']: a, q0, r0, u0, v0 = self.generate(type, mode, p) qs, rs, us, vs = adjust_strides((q0, r0, u0, v0)) if p == 1: aup = a + np.outer(u0, v0.conj()) else: aup = a + np.dot(u0, v0.T.conj()) # for each variable, q, r, u, v we try with it strided and # overwrite=False. Then we try with overwrite=True, and make # sure that if p == 1, r and v are still overwritten. # a strided q and u must always be copied. q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') q1, r1 = qr_update(qs, r, u, v, False) check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr) q1o, r1o = qr_update(qs, r, u, v, True) check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr) if overwriteable: assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol) assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') q2, r2 = qr_update(q, rs, u, v, False) check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr) q2o, r2o = qr_update(q, rs, u, v, True) check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr) if overwriteable: assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol) assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') q3, r3 = qr_update(q, r, us, v, False) check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr) q3o, r3o = qr_update(q, r, us, v, True) check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr) if overwriteable: assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol) assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') q4, r4 = qr_update(q, r, u, vs, False) check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr) q4o, r4o = qr_update(q, r, u, vs, True) check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr) if overwriteable: assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol) assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') # since some of these were consumed above qs, rs, us, vs = adjust_strides((q, r, u, v)) q5, r5 = qr_update(qs, rs, us, vs, False) check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr) q5o, r5o = qr_update(qs, rs, us, vs, True) check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr) if overwriteable: assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol) assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) def test_non_unit_strides_rank_1(self): self.base_non_simple_strides(make_strided, 'full', 1, True) def test_non_unit_strides_economic_rank_1(self): self.base_non_simple_strides(make_strided, 'economic', 1, True) def test_non_unit_strides_rank_p(self): self.base_non_simple_strides(make_strided, 'full', 3, False) def test_non_unit_strides_economic_rank_p(self): self.base_non_simple_strides(make_strided, 'economic', 3, False) def test_neg_strides_rank_1(self): self.base_non_simple_strides(negate_strides, 'full', 1, False) def test_neg_strides_economic_rank_1(self): self.base_non_simple_strides(negate_strides, 'economic', 1, False) def test_neg_strides_rank_p(self): self.base_non_simple_strides(negate_strides, 'full', 3, False) def test_neg_strides_economic_rank_p(self): self.base_non_simple_strides(negate_strides, 'economic', 3, False) def test_non_itemsize_strides_rank_1(self): self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False) def test_non_itemsize_strides_economic_rank_1(self): self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False) def test_non_itemsize_strides_rank_p(self): self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False) def test_non_itemsize_strides_economic_rank_p(self): self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False) def test_non_native_byte_order_rank_1(self): self.base_non_simple_strides(make_nonnative, 'full', 1, False) def test_non_native_byte_order_economic_rank_1(self): self.base_non_simple_strides(make_nonnative, 'economic', 1, False) def test_non_native_byte_order_rank_p(self): self.base_non_simple_strides(make_nonnative, 'full', 3, False) def test_non_native_byte_order_economic_rank_p(self): self.base_non_simple_strides(make_nonnative, 'economic', 3, False) def test_overwrite_qruv_rank_1(self): # Any positive strided q, r, u, and v can be overwritten for a rank 1 # update, only checking C and F contiguous. a, q0, r0, u0, v0 = self.generate('sqr') a1 = a + np.outer(u0, v0.conj()) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('F') # don't overwrite q1, r1 = qr_update(q, r, u, v, False) check_qr(q1, r1, a1, self.rtol, self.atol) check_qr(q, r, a, self.rtol, self.atol) q2, r2 = qr_update(q, r, u, v, True) check_qr(q2, r2, a1, self.rtol, self.atol) # verify the overwriting, no good way to check u and v. assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) q = q0.copy('C') r = r0.copy('C') u = u0.copy('C') v = v0.copy('C') q3, r3 = qr_update(q, r, u, v, True) check_qr(q3, r3, a1, self.rtol, self.atol) assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) def test_overwrite_qruv_rank_1_economic(self): # updating economic decompositions can overwrite any contigous r, # and positively strided r and u. V is only ever read. # only checking C and F contiguous. a, q0, r0, u0, v0 = self.generate('tall', 'economic') a1 = a + np.outer(u0, v0.conj()) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('F') # don't overwrite q1, r1 = qr_update(q, r, u, v, False) check_qr(q1, r1, a1, self.rtol, self.atol, False) check_qr(q, r, a, self.rtol, self.atol, False) q2, r2 = qr_update(q, r, u, v, True) check_qr(q2, r2, a1, self.rtol, self.atol, False) # verify the overwriting, no good way to check u and v. assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) q = q0.copy('C') r = r0.copy('C') u = u0.copy('C') v = v0.copy('C') q3, r3 = qr_update(q, r, u, v, True) check_qr(q3, r3, a1, self.rtol, self.atol, False) assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) def test_overwrite_qruv_rank_p(self): # for rank p updates, q r must be F contiguous, v must be C (v.T --> F) # and u can be C or F, but is only overwritten if Q is C and complex a, q0, r0, u0, v0 = self.generate('sqr', p=3) a1 = a + np.dot(u0, v0.T.conj()) q = q0.copy('F') r = r0.copy('F') u = u0.copy('F') v = v0.copy('C') # don't overwrite q1, r1 = qr_update(q, r, u, v, False) check_qr(q1, r1, a1, self.rtol, self.atol) check_qr(q, r, a, self.rtol, self.atol) q2, r2 = qr_update(q, r, u, v, True) check_qr(q2, r2, a1, self.rtol, self.atol) # verify the overwriting, no good way to check u and v. assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) def test_empty_inputs(self): a, q, r, u, v = self.generate('tall') assert_raises(ValueError, qr_update, np.array([]), r, u, v) assert_raises(ValueError, qr_update, q, np.array([]), u, v) assert_raises(ValueError, qr_update, q, r, np.array([]), v) assert_raises(ValueError, qr_update, q, r, u, np.array([])) def test_mismatched_shapes(self): a, q, r, u, v = self.generate('tall') assert_raises(ValueError, qr_update, q, r[1:], u, v) assert_raises(ValueError, qr_update, q[:-2], r, u, v) assert_raises(ValueError, qr_update, q, r, u[1:], v) assert_raises(ValueError, qr_update, q, r, u, v[1:]) def test_unsupported_dtypes(self): dts = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'longdouble', 'longcomplex', 'bool'] a, q0, r0, u0, v0 = self.generate('tall') for dtype in dts: q = q0.real.astype(dtype) r = r0.real.astype(dtype) u = u0.real.astype(dtype) v = v0.real.astype(dtype) assert_raises(ValueError, qr_update, q, r0, u0, v0) assert_raises(ValueError, qr_update, q0, r, u0, v0) assert_raises(ValueError, qr_update, q0, r0, u, v0) assert_raises(ValueError, qr_update, q0, r0, u0, v) def test_integer_input(self): q = np.arange(16).reshape(4, 4) r = q.copy() # doesn't matter u = q[:, 0].copy() v = r[0, :].copy() assert_raises(ValueError, qr_update, q, r, u, v) def test_check_finite(self): a0, q0, r0, u0, v0 = self.generate('tall', p=3) q = q0.copy('F') q[1,1] = np.nan assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q, r0, u0, v0) r = r0.copy('F') r[1,1] = np.nan assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q0, r, u0, v0) u = u0.copy('F') u[0,0] = np.nan assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q0, r0, u, v0) v = v0.copy('F') v[0,0] = np.nan assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) assert_raises(ValueError, qr_update, q0, r0, u, v) def test_economic_check_finite(self): a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3) q = q0.copy('F') q[1,1] = np.nan assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q, r0, u0, v0) r = r0.copy('F') r[1,1] = np.nan assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q0, r, u0, v0) u = u0.copy('F') u[0,0] = np.nan assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) assert_raises(ValueError, qr_update, q0, r0, u, v0) v = v0.copy('F') v[0,0] = np.nan assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) assert_raises(ValueError, qr_update, q0, r0, u, v) class TestQRupdate_f(BaseQRupdate): dtype = np.dtype('f') class TestQRupdate_F(BaseQRupdate): dtype = np.dtype('F') class TestQRupdate_d(BaseQRupdate): dtype = np.dtype('d') class TestQRupdate_D(BaseQRupdate): dtype = np.dtype('D') def test_form_qTu(): # We want to ensure that all of the code paths through this function are # tested. Most of them should be hit with the rest of test suite, but # explicit tests make clear precisely what is being tested. # # This function expects that Q is either C or F contiguous and square. # Economic mode decompositions (Q is (M, N), M != N) do not go through this # function. U may have any positive strides. # # Some of these test are duplicates, since contiguous 1d arrays are both C # and F. q_order = ['F', 'C'] q_shape = [(8, 8), ] u_order = ['F', 'C', 'A'] # here A means is not F not C u_shape = [1, 3] dtype = ['f', 'd', 'F', 'D'] for qo, qs, uo, us, d in \ itertools.product(q_order, q_shape, u_order, u_shape, dtype): if us == 1: check_form_qTu(qo, qs, uo, us, 1, d) check_form_qTu(qo, qs, uo, us, 2, d) else: check_form_qTu(qo, qs, uo, us, 2, d) def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype): np.random.seed(47) if u_shape == 1 and u_ndim == 1: u_shape = (q_shape[0],) else: u_shape = (q_shape[0], u_shape) dtype = np.dtype(dtype) if dtype.char in 'fd': q = np.random.random(q_shape) u = np.random.random(u_shape) elif dtype.char in 'FD': q = np.random.random(q_shape) + 1j*np.random.random(q_shape) u = np.random.random(u_shape) + 1j*np.random.random(u_shape) else: ValueError("form_qTu doesn't support this dtype") q = np.require(q, dtype, q_order) if u_order != 'A': u = np.require(u, dtype, u_order) else: u, = make_strided((u.astype(dtype),)) rtol = 10.0 ** -(np.finfo(dtype).precision-2) atol = 2*np.finfo(dtype).eps expected = np.dot(q.T.conj(), u) res = _decomp_update._form_qTu(q, u) assert_allclose(res, expected, rtol=rtol, atol=atol)
67,945
39.228538
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/pseudo_diffs.py
""" Differential and pseudo-differential operators. """ # Created by Pearu Peterson, September 2002 from __future__ import division, print_function, absolute_import __all__ = ['diff', 'tilbert','itilbert','hilbert','ihilbert', 'cs_diff','cc_diff','sc_diff','ss_diff', 'shift'] from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj from . import convolve from scipy.fftpack.basic import _datacopied import atexit atexit.register(convolve.destroy_convolve_cache) del atexit _cache = {} def diff(x,order=1,period=None, _cache=_cache): """ Return k-th derivative (or integral) of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j y_0 = 0 if order is not 0. Parameters ---------- x : array_like Input array. order : int, optional The order of differentiation. Default order is 1. If order is negative, then integration is carried out under the assumption that ``x_0 == 0``. period : float, optional The assumed period of the sequence. Default is ``2*pi``. Notes ----- If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within numerical accuracy). For odd order and even ``len(x)``, the Nyquist mode is taken zero. """ tmp = asarray(x) if order == 0: return tmp if iscomplexobj(tmp): return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period) if period is not None: c = 2*pi/period else: c = 1.0 n = len(x) omega = _cache.get((n,order,c)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,order=order,c=c): if k: return pow(c*k,order) return 0 omega = convolve.init_convolution_kernel(n,kernel,d=order, zero_nyquist=1) _cache[(n,order,c)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=order % 2, overwrite_x=overwrite_x) del _cache _cache = {} def tilbert(x, h, period=None, _cache=_cache): """ Return h-Tilbert transform of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like The input array to transform. h : float Defines the parameter of the Tilbert transform. period : float, optional The assumed period of the sequence. Default period is ``2*pi``. Returns ------- tilbert : ndarray The result of the transform. Notes ----- If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd then ``tilbert(itilbert(x)) == x``. If ``2 * pi * h / period`` is approximately 10 or larger, then numerically ``tilbert == hilbert`` (theoretically oo-Tilbert == Hilbert). For even ``len(x)``, the Nyquist mode of ``x`` is taken zero. """ tmp = asarray(x) if iscomplexobj(tmp): return tilbert(tmp.real, h, period) + \ 1j * tilbert(tmp.imag, h, period) if period is not None: h = h * 2 * pi / period n = len(x) omega = _cache.get((n, h)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k, h=h): if k: return 1.0/tanh(h*k) return 0 omega = convolve.init_convolution_kernel(n, kernel, d=1) _cache[(n,h)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) del _cache _cache = {} def itilbert(x,h,period=None, _cache=_cache): """ Return inverse h-Tilbert transform of a periodic sequence x. If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j y_0 = 0 For more details, see `tilbert`. """ tmp = asarray(x) if iscomplexobj(tmp): return itilbert(tmp.real,h,period) + \ 1j*itilbert(tmp.imag,h,period) if period is not None: h = h*2*pi/period n = len(x) omega = _cache.get((n,h)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,h=h): if k: return -tanh(h*k) return 0 omega = convolve.init_convolution_kernel(n,kernel,d=1) _cache[(n,h)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) del _cache _cache = {} def hilbert(x, _cache=_cache): """ Return Hilbert transform of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*sign(j) * x_j y_0 = 0 Parameters ---------- x : array_like The input array, should be periodic. _cache : dict, optional Dictionary that contains the kernel used to do a convolution with. Returns ------- y : ndarray The transformed input. See Also -------- scipy.signal.hilbert : Compute the analytic signal, using the Hilbert transform. Notes ----- If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``. For even len(x), the Nyquist mode of x is taken zero. The sign of the returned transform does not have a factor -1 that is more often than not found in the definition of the Hilbert transform. Note also that `scipy.signal.hilbert` does have an extra -1 factor compared to this function. """ tmp = asarray(x) if iscomplexobj(tmp): return hilbert(tmp.real)+1j*hilbert(tmp.imag) n = len(x) omega = _cache.get(n) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k): if k > 0: return 1.0 elif k < 0: return -1.0 return 0.0 omega = convolve.init_convolution_kernel(n,kernel,d=1) _cache[n] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) del _cache def ihilbert(x): """ Return inverse Hilbert transform of a periodic sequence x. If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*sign(j) * x_j y_0 = 0 """ return -hilbert(x) _cache = {} def cs_diff(x, a, b, period=None, _cache=_cache): """ Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence. If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like The array to take the pseudo-derivative from. a, b : float Defines the parameters of the cosh/sinh pseudo-differential operator. period : float, optional The period of the sequence. Default period is ``2*pi``. Returns ------- cs_diff : ndarray Pseudo-derivative of periodic sequence `x`. Notes ----- For even len(`x`), the Nyquist mode of `x` is taken as zero. """ tmp = asarray(x) if iscomplexobj(tmp): return cs_diff(tmp.real,a,b,period) + \ 1j*cs_diff(tmp.imag,a,b,period) if period is not None: a = a*2*pi/period b = b*2*pi/period n = len(x) omega = _cache.get((n,a,b)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,a=a,b=b): if k: return -cosh(a*k)/sinh(b*k) return 0 omega = convolve.init_convolution_kernel(n,kernel,d=1) _cache[(n,a,b)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) del _cache _cache = {} def sc_diff(x, a, b, period=None, _cache=_cache): """ Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like Input array. a,b : float Defines the parameters of the sinh/cosh pseudo-differential operator. period : float, optional The period of the sequence x. Default is 2*pi. Notes ----- ``sc_diff(cs_diff(x,a,b),b,a) == x`` For even ``len(x)``, the Nyquist mode of x is taken as zero. """ tmp = asarray(x) if iscomplexobj(tmp): return sc_diff(tmp.real,a,b,period) + \ 1j*sc_diff(tmp.imag,a,b,period) if period is not None: a = a*2*pi/period b = b*2*pi/period n = len(x) omega = _cache.get((n,a,b)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,a=a,b=b): if k: return sinh(a*k)/cosh(b*k) return 0 omega = convolve.init_convolution_kernel(n,kernel,d=1) _cache[(n,a,b)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) del _cache _cache = {} def ss_diff(x, a, b, period=None, _cache=_cache): """ Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j y_0 = a/b * x_0 Parameters ---------- x : array_like The array to take the pseudo-derivative from. a,b Defines the parameters of the sinh/sinh pseudo-differential operator. period : float, optional The period of the sequence x. Default is ``2*pi``. Notes ----- ``ss_diff(ss_diff(x,a,b),b,a) == x`` """ tmp = asarray(x) if iscomplexobj(tmp): return ss_diff(tmp.real,a,b,period) + \ 1j*ss_diff(tmp.imag,a,b,period) if period is not None: a = a*2*pi/period b = b*2*pi/period n = len(x) omega = _cache.get((n,a,b)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,a=a,b=b): if k: return sinh(a*k)/sinh(b*k) return float(a)/b omega = convolve.init_convolution_kernel(n,kernel) _cache[(n,a,b)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) del _cache _cache = {} def cc_diff(x, a, b, period=None, _cache=_cache): """ Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j Parameters ---------- x : array_like The array to take the pseudo-derivative from. a,b : float Defines the parameters of the sinh/sinh pseudo-differential operator. period : float, optional The period of the sequence x. Default is ``2*pi``. Returns ------- cc_diff : ndarray Pseudo-derivative of periodic sequence `x`. Notes ----- ``cc_diff(cc_diff(x,a,b),b,a) == x`` """ tmp = asarray(x) if iscomplexobj(tmp): return cc_diff(tmp.real,a,b,period) + \ 1j*cc_diff(tmp.imag,a,b,period) if period is not None: a = a*2*pi/period b = b*2*pi/period n = len(x) omega = _cache.get((n,a,b)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k,a=a,b=b): return cosh(a*k)/cosh(b*k) omega = convolve.init_convolution_kernel(n,kernel) _cache[(n,a,b)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) del _cache _cache = {} def shift(x, a, period=None, _cache=_cache): """ Shift periodic sequence x by a: y(u) = x(u+a). If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f Parameters ---------- x : array_like The array to take the pseudo-derivative from. a : float Defines the parameters of the sinh/sinh pseudo-differential period : float, optional The period of the sequences x and y. Default period is ``2*pi``. """ tmp = asarray(x) if iscomplexobj(tmp): return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period) if period is not None: a = a*2*pi/period n = len(x) omega = _cache.get((n,a)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel_real(k,a=a): return cos(a*k) def kernel_imag(k,a=a): return sin(a*k) omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0, zero_nyquist=0) omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1, zero_nyquist=0) _cache[(n,a)] = omega_real,omega_imag else: omega_real,omega_imag = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve_z(tmp,omega_real,omega_imag, overwrite_x=overwrite_x) del _cache
14,335
24.691756
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/setup.py
# Created by Pearu Peterson, August 2002 from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fftpack',parent_package, top_path) config.add_data_dir('tests') dfftpack_src = [join('src/dfftpack','*.f')] config.add_library('dfftpack', sources=dfftpack_src) fftpack_src = [join('src/fftpack','*.f')] config.add_library('fftpack', sources=fftpack_src) sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c', 'src/zfftnd.c', 'src/dct.c.src', 'src/dst.c.src'] config.add_extension('_fftpack', sources=sources, libraries=['dfftpack', 'fftpack'], include_dirs=['src'], depends=(dfftpack_src + fftpack_src)) config.add_extension('convolve', sources=['convolve.pyf','src/convolve.c'], libraries=['dfftpack'], depends=dfftpack_src, ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,164
27.414634
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/helper.py
from __future__ import division, print_function, absolute_import import operator from numpy import arange from numpy.fft.helper import fftshift, ifftshift, fftfreq from bisect import bisect_left __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] def rfftfreq(n, d=1.0): """DFT sample frequencies (for usage with rfft, irfft). The returned float array contains the frequency bins in cycles/unit (with zero at the start) given a window length `n` and a sample spacing `d`:: f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd Parameters ---------- n : int Window length. d : scalar, optional Sample spacing. Default is 1. Returns ------- out : ndarray The array of length `n`, containing the sample frequencies. Examples -------- >>> from scipy import fftpack >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> sig_fft = fftpack.rfft(sig) >>> n = sig_fft.size >>> timestep = 0.1 >>> freq = fftpack.rfftfreq(n, d=timestep) >>> freq array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) """ n = operator.index(n) if n < 0: raise ValueError("n = %s is not valid. " "n must be a nonnegative integer." % n) return (arange(1, n + 1, dtype=int) // 2) / float(n * d) def next_fast_len(target): """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> min_len = 10007 # prime length is worst case for speed >>> a = np.random.randn(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target-1)): return target # Get result quickly for small sizes, since FFT itself is similarly fast. if target <= hams[-1]: return hams[bisect_left(hams, target)] match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient p2 = 2**((quotient - 1).bit_length()) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
4,892
31.190789
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/realtransforms.py
""" Real spectrum transforms (DCT, DST, MDCT) """ from __future__ import division, print_function, absolute_import __all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] import numpy as np from scipy.fftpack import _fftpack from scipy.fftpack.basic import _datacopied, _fix_shape, _asfarray import atexit atexit.register(_fftpack.destroy_ddct1_cache) atexit.register(_fftpack.destroy_ddct2_cache) atexit.register(_fftpack.destroy_dct1_cache) atexit.register(_fftpack.destroy_dct2_cache) atexit.register(_fftpack.destroy_ddst1_cache) atexit.register(_fftpack.destroy_ddst2_cache) atexit.register(_fftpack.destroy_dst1_cache) atexit.register(_fftpack.destroy_dst2_cache) def _init_nd_shape_and_axes(x, shape, axes): """Handle shape and axes arguments for dctn, idctn, dstn, idstn.""" if shape is None: if axes is None: shape = x.shape else: shape = np.take(x.shape, axes) shape = tuple(shape) for dim in shape: if dim < 1: raise ValueError("Invalid number of DCT data points " "(%s) specified." % (shape,)) if axes is None: axes = list(range(-x.ndim, 0)) elif np.isscalar(axes): axes = [axes, ] if len(axes) != len(shape): raise ValueError("when given, axes and shape arguments " "have to be of the same length") if len(np.unique(axes)) != len(axes): raise ValueError("All axes must be unique.") return shape, axes def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): """ Return multidimensional Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. shape : tuple of ints, optional The shape of the result. If both `shape` and `axes` (see below) are None, `shape` is ``x.shape``; if `shape` is None but `axes` is not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to length ``shape[i]``. axes : tuple or None, optional Axes along which the DCT is computed; the default is over all axes. norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- y : ndarray of real The transformed input array. See Also -------- idctn : Inverse multidimensional DCT Notes ----- For full details of the DCT types and normalization modes, as well as references, see `dct`. Examples -------- >>> from scipy.fftpack import dctn, idctn >>> y = np.random.randn(16, 16) >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) True """ x = np.asanyarray(x) shape, axes = _init_nd_shape_and_axes(x, shape, axes) for n, ax in zip(shape, axes): x = dct(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) return x def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): """ Return multidimensional Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. shape : tuple of ints, optional The shape of the result. If both `shape` and `axes` (see below) are None, `shape` is ``x.shape``; if `shape` is None but `axes` is not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to length ``shape[i]``. axes : tuple or None, optional Axes along which the IDCT is computed; the default is over all axes. norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- y : ndarray of real The transformed input array. See Also -------- dctn : multidimensional DCT Notes ----- For full details of the IDCT types and normalization modes, as well as references, see `idct`. Examples -------- >>> from scipy.fftpack import dctn, idctn >>> y = np.random.randn(16, 16) >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) True """ x = np.asanyarray(x) shape, axes = _init_nd_shape_and_axes(x, shape, axes) for n, ax in zip(shape, axes): x = idct(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) return x def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): """ Return multidimensional Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. shape : tuple of ints, optional The shape of the result. If both `shape` and `axes` (see below) are None, `shape` is ``x.shape``; if `shape` is None but `axes` is not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to length ``shape[i]``. axes : tuple or None, optional Axes along which the DCT is computed; the default is over all axes. norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- y : ndarray of real The transformed input array. See Also -------- idstn : Inverse multidimensional DST Notes ----- For full details of the DST types and normalization modes, as well as references, see `dst`. Examples -------- >>> from scipy.fftpack import dstn, idstn >>> y = np.random.randn(16, 16) >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) True """ x = np.asanyarray(x) shape, axes = _init_nd_shape_and_axes(x, shape, axes) for n, ax in zip(shape, axes): x = dst(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) return x def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): """ Return multidimensional Discrete Sine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. shape : tuple of ints, optional The shape of the result. If both `shape` and `axes` (see below) are None, `shape` is ``x.shape``; if `shape` is None but `axes` is not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to length ``shape[i]``. axes : tuple or None, optional Axes along which the IDCT is computed; the default is over all axes. norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- y : ndarray of real The transformed input array. See Also -------- dctn : multidimensional DST Notes ----- For full details of the IDST types and normalization modes, as well as references, see `idst`. Examples -------- >>> from scipy.fftpack import dstn, idstn >>> y = np.random.randn(16, 16) >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) True """ x = np.asanyarray(x) shape, axes = _init_nd_shape_and_axes(x, shape, axes) for n, ax in zip(shape, axes): x = idst(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) return x def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): """ Return the Discrete Cosine Transform of arbitrary type sequence x. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. n : int, optional Length of the transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the dct is computed; the default is over the last axis (i.e., ``axis=-1``). norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- y : ndarray of real The transformed input array. See Also -------- idct : Inverse DCT Notes ----- For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to MATLAB ``dct(x)``. There are theoretically 8 types of the DCT, only the first 3 types are implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' Inverse DCT generally refers to DCT type 3. **Type I** There are several definitions of the DCT-I; we use the following (for ``norm=None``):: N-2 y[k] = x[0] + (-1)**k x[N-1] + 2 * sum x[n]*cos(pi*k*n/(N-1)) n=1 Only None is supported as normalization mode for DCT-I. Note also that the DCT-I is only supported for input size > 1 **Type II** There are several definitions of the DCT-II; we use the following (for ``norm=None``):: N-1 y[k] = 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N. n=0 If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`:: f = sqrt(1/(4*N)) if k = 0, f = sqrt(1/(2*N)) otherwise. Which makes the corresponding matrix of coefficients orthonormal (``OO' = Id``). **Type III** There are several definitions, we use the following (for ``norm=None``):: N-1 y[k] = x[0] + 2 * sum x[n]*cos(pi*(k+0.5)*n/N), 0 <= k < N. n=1 or, for ``norm='ortho'`` and 0 <= k < N:: N-1 y[k] = x[0] / sqrt(N) + sqrt(2/N) * sum x[n]*cos(pi*(k+0.5)*n/N) n=1 The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of the orthonormalized DCT-II. References ---------- .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. Makhoul, `IEEE Transactions on acoustics, speech and signal processing` vol. 28(1), pp. 27-34, http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980). .. [2] Wikipedia, "Discrete cosine transform", http://en.wikipedia.org/wiki/Discrete_cosine_transform Examples -------- The Type 1 DCT is equivalent to the FFT (though faster) for real, even-symmetrical inputs. The output is also real and even-symmetrical. Half of the FFT input is used to generate half of the FFT output: >>> from scipy.fftpack import fft, dct >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real array([ 30., -8., 6., -2., 6., -8.]) >>> dct(np.array([4., 3., 5., 10.]), 1) array([ 30., -8., 6., -2.]) """ if type == 1 and norm is not None: raise NotImplementedError( "Orthonormalization not yet supported for DCT-I") return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x) def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): """ Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DCT (see Notes). Default type is 2. n : int, optional Length of the transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the idct is computed; the default is over the last axis (i.e., ``axis=-1``). norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- idct : ndarray of real The transformed input array. See Also -------- dct : Forward DCT Notes ----- For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to MATLAB ``idct(x)``. 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3. IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type 3, and IDCT of type 3 is the DCT of type 2. For the definition of these types, see `dct`. Examples -------- The Type 1 DCT is equivalent to the DFT for real, even-symmetrical inputs. The output is also real and even-symmetrical. Half of the IFFT input is used to generate half of the IFFT output: >>> from scipy.fftpack import ifft, idct >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real array([ 4., 3., 5., 10., 5., 3.]) >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6 array([ 4., 3., 5., 10.]) """ if type == 1 and norm is not None: raise NotImplementedError( "Orthonormalization not yet supported for IDCT-I") # Inverse/forward type table _TP = {1:1, 2:3, 3:2} return _dct(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x) def _get_dct_fun(type, dtype): try: name = {'float64':'ddct%d', 'float32':'dct%d'}[dtype.name] except KeyError: raise ValueError("dtype %s not supported" % dtype) try: f = getattr(_fftpack, name % type) except AttributeError as e: raise ValueError(str(e) + ". Type %d not understood" % type) return f def _get_norm_mode(normalize): try: nm = {None:0, 'ortho':1}[normalize] except KeyError: raise ValueError("Unknown normalize mode %s" % normalize) return nm def __fix_shape(x, n, axis, dct_or_dst): tmp = _asfarray(x) copy_made = _datacopied(tmp, x) if n is None: n = tmp.shape[axis] elif n != tmp.shape[axis]: tmp, copy_made2 = _fix_shape(tmp, n, axis) copy_made = copy_made or copy_made2 if n < 1: raise ValueError("Invalid number of %s data points " "(%d) specified." % (dct_or_dst, n)) return tmp, n, copy_made def _raw_dct(x0, type, n, axis, nm, overwrite_x): f = _get_dct_fun(type, x0.dtype) return _eval_fun(f, x0, n, axis, nm, overwrite_x) def _raw_dst(x0, type, n, axis, nm, overwrite_x): f = _get_dst_fun(type, x0.dtype) return _eval_fun(f, x0, n, axis, nm, overwrite_x) def _eval_fun(f, tmp, n, axis, nm, overwrite_x): if axis == -1 or axis == len(tmp.shape) - 1: return f(tmp, n, nm, overwrite_x) tmp = np.swapaxes(tmp, axis, -1) tmp = f(tmp, n, nm, overwrite_x) return np.swapaxes(tmp, axis, -1) def _dct(x, type, n=None, axis=-1, overwrite_x=False, normalize=None): """ Return Discrete Cosine Transform of arbitrary type sequence x. Parameters ---------- x : array_like input array. n : int, optional Length of the transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the dct is computed; the default is over the last axis (i.e., ``axis=-1``). overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- z : ndarray """ x0, n, copy_made = __fix_shape(x, n, axis, 'DCT') if type == 1 and n < 2: raise ValueError("DCT-I is not defined for size < 2") overwrite_x = overwrite_x or copy_made nm = _get_norm_mode(normalize) if np.iscomplexobj(x0): return (_raw_dct(x0.real, type, n, axis, nm, overwrite_x) + 1j * _raw_dct(x0.imag, type, n, axis, nm, overwrite_x)) else: return _raw_dct(x0, type, n, axis, nm, overwrite_x) def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): """ Return the Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the dst is computed; the default is over the last axis (i.e., ``axis=-1``). norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- dst : ndarray of reals The transformed input array. See Also -------- idst : Inverse DST Notes ----- For a single dimension array ``x``. There are theoretically 8 types of the DST for different combinations of even/odd boundary conditions and boundary off sets [1]_, only the first 3 types are implemented in scipy. **Type I** There are several definitions of the DST-I; we use the following for ``norm=None``. DST-I assumes the input is odd around n=-1 and n=N. :: N-1 y[k] = 2 * sum x[n]*sin(pi*(k+1)*(n+1)/(N+1)) n=0 Only None is supported as normalization mode for DCT-I. Note also that the DCT-I is only supported for input size > 1 The (unnormalized) DCT-I is its own inverse, up to a factor `2(N+1)`. **Type II** There are several definitions of the DST-II; we use the following for ``norm=None``. DST-II assumes the input is odd around n=-1/2 and n=N-1/2; the output is odd around k=-1 and even around k=N-1 :: N-1 y[k] = 2* sum x[n]*sin(pi*(k+1)*(n+0.5)/N), 0 <= k < N. n=0 if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f` :: f = sqrt(1/(4*N)) if k == 0 f = sqrt(1/(2*N)) otherwise. **Type III** There are several definitions of the DST-III, we use the following (for ``norm=None``). DST-III assumes the input is odd around n=-1 and even around n=N-1 :: N-2 y[k] = x[N-1]*(-1)**k + 2* sum x[n]*sin(pi*(k+0.5)*(n+1)/N), 0 <= k < N. n=0 The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the orthonormalized DST-II. .. versionadded:: 0.11.0 References ---------- .. [1] Wikipedia, "Discrete sine transform", http://en.wikipedia.org/wiki/Discrete_sine_transform """ if type == 1 and norm is not None: raise NotImplementedError( "Orthonormalization not yet supported for IDCT-I") return _dst(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x) def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): """ Return the Inverse Discrete Sine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the idst is computed; the default is over the last axis (i.e., ``axis=-1``). norm : {None, 'ortho'}, optional Normalization mode (see Notes). Default is None. overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- idst : ndarray of real The transformed input array. See Also -------- dst : Forward DST Notes ----- 'The' IDST is the IDST of type 2, which is the same as DST of type 3. IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type 3, and IDST of type 3 is the DST of type 2. For the definition of these types, see `dst`. .. versionadded:: 0.11.0 """ if type == 1 and norm is not None: raise NotImplementedError( "Orthonormalization not yet supported for IDCT-I") # Inverse/forward type table _TP = {1:1, 2:3, 3:2} return _dst(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x) def _get_dst_fun(type, dtype): try: name = {'float64':'ddst%d', 'float32':'dst%d'}[dtype.name] except KeyError: raise ValueError("dtype %s not supported" % dtype) try: f = getattr(_fftpack, name % type) except AttributeError as e: raise ValueError(str(e) + ". Type %d not understood" % type) return f def _dst(x, type, n=None, axis=-1, overwrite_x=False, normalize=None): """ Return Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array_like input array. n : int, optional Length of the transform. axis : int, optional Axis along which the dst is computed. (default=-1) overwrite_x : bool, optional If True the contents of x can be destroyed. (default=False) Returns ------- z : real ndarray """ x0, n, copy_made = __fix_shape(x, n, axis, 'DST') if type == 1 and n < 2: raise ValueError("DST-I is not defined for size < 2") overwrite_x = overwrite_x or copy_made nm = _get_norm_mode(normalize) if np.iscomplexobj(x0): return (_raw_dst(x0.real, type, n, axis, nm, overwrite_x) + 1j * _raw_dst(x0.imag, type, n, axis, nm, overwrite_x)) else: return _raw_dst(x0, type, n, axis, nm, overwrite_x)
23,351
31.523677
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/__init__.py
""" ================================================== Discrete Fourier transforms (:mod:`scipy.fftpack`) ================================================== Fast Fourier Transforms (FFTs) ============================== .. autosummary:: :toctree: generated/ fft - Fast (discrete) Fourier Transform (FFT) ifft - Inverse FFT fft2 - Two dimensional FFT ifft2 - Two dimensional inverse FFT fftn - n-dimensional FFT ifftn - n-dimensional inverse FFT rfft - FFT of strictly real-valued sequence irfft - Inverse of rfft dct - Discrete cosine transform idct - Inverse discrete cosine transform dctn - n-dimensional Discrete cosine transform idctn - n-dimensional Inverse discrete cosine transform dst - Discrete sine transform idst - Inverse discrete sine transform dstn - n-dimensional Discrete sine transform idstn - n-dimensional Inverse discrete sine transform Differential and pseudo-differential operators ============================================== .. autosummary:: :toctree: generated/ diff - Differentiation and integration of periodic sequences tilbert - Tilbert transform: cs_diff(x,h,h) itilbert - Inverse Tilbert transform: sc_diff(x,h,h) hilbert - Hilbert transform: cs_diff(x,inf,inf) ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf) cs_diff - cosh/sinh pseudo-derivative of periodic sequences sc_diff - sinh/cosh pseudo-derivative of periodic sequences ss_diff - sinh/sinh pseudo-derivative of periodic sequences cc_diff - cosh/cosh pseudo-derivative of periodic sequences shift - Shift periodic sequences Helper functions ================ .. autosummary:: :toctree: generated/ fftshift - Shift the zero-frequency component to the center of the spectrum ifftshift - The inverse of `fftshift` fftfreq - Return the Discrete Fourier Transform sample frequencies rfftfreq - DFT sample frequencies (for usage with rfft, irfft) next_fast_len - Find the optimal length to zero-pad an FFT for speed Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions exposed by ``fftpack``; importing them from ``numpy`` should be preferred. Convolutions (:mod:`scipy.fftpack.convolve`) ============================================ .. module:: scipy.fftpack.convolve .. autosummary:: :toctree: generated/ convolve convolve_z init_convolution_kernel destroy_convolve_cache """ # List of possibly useful functions in scipy.fftpack._fftpack: # drfft # zfft # zrfft # zfftnd # destroy_drfft_cache # destroy_zfft_cache # destroy_zfftnd_cache from __future__ import division, print_function, absolute_import __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', 'fft2','ifft2', 'diff', 'tilbert','itilbert','hilbert','ihilbert', 'sc_diff','cs_diff','cc_diff','ss_diff', 'shift', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift', 'next_fast_len', ] from .basic import * from .pseudo_diffs import * from .helper import * from numpy.dual import register_func for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']: register_func(k, eval(k)) del k, register_func from .realtransforms import * __all__.extend(['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']) from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
3,466
29.147826
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/basic.py
""" Discrete Fourier Transforms - basic.py """ # Created by Pearu Peterson, August,September 2002 from __future__ import division, print_function, absolute_import __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', 'fft2','ifft2'] from numpy import zeros, swapaxes import numpy from . import _fftpack import atexit atexit.register(_fftpack.destroy_zfft_cache) atexit.register(_fftpack.destroy_zfftnd_cache) atexit.register(_fftpack.destroy_drfft_cache) atexit.register(_fftpack.destroy_cfft_cache) atexit.register(_fftpack.destroy_cfftnd_cache) atexit.register(_fftpack.destroy_rfft_cache) del atexit def istype(arr, typeclass): return issubclass(arr.dtype.type, typeclass) def _datacopied(arr, original): """ Strict check for `arr` not sharing any data with `original`, under the assumption that arr = asarray(original) """ if arr is original: return False if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'): return False return arr.base is None # XXX: single precision FFTs partially disabled due to accuracy issues # for large prime-sized inputs. # # See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834 # ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010, # @ scipy-dev) # # These should be re-enabled once the problems are resolved def _is_safe_size(n): """ Is the size of FFT such that FFTPACK can handle it in single precision with sufficient accuracy? Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those """ n = int(n) if n == 0: return True # Divide by 3 until you can't, then by 5 until you can't for c in (3, 5): while n % c == 0: n //= c # Return True if the remainder is a power of 2 return not n & (n-1) def _fake_crfft(x, n, *a, **kw): if _is_safe_size(n): return _fftpack.crfft(x, n, *a, **kw) else: return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64) def _fake_cfft(x, n, *a, **kw): if _is_safe_size(n): return _fftpack.cfft(x, n, *a, **kw) else: return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64) def _fake_rfft(x, n, *a, **kw): if _is_safe_size(n): return _fftpack.rfft(x, n, *a, **kw) else: return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32) def _fake_cfftnd(x, shape, *a, **kw): if numpy.all(list(map(_is_safe_size, shape))): return _fftpack.cfftnd(x, shape, *a, **kw) else: return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64) _DTYPE_TO_FFT = { # numpy.dtype(numpy.float32): _fftpack.crfft, numpy.dtype(numpy.float32): _fake_crfft, numpy.dtype(numpy.float64): _fftpack.zrfft, # numpy.dtype(numpy.complex64): _fftpack.cfft, numpy.dtype(numpy.complex64): _fake_cfft, numpy.dtype(numpy.complex128): _fftpack.zfft, } _DTYPE_TO_RFFT = { # numpy.dtype(numpy.float32): _fftpack.rfft, numpy.dtype(numpy.float32): _fake_rfft, numpy.dtype(numpy.float64): _fftpack.drfft, } _DTYPE_TO_FFTN = { # numpy.dtype(numpy.complex64): _fftpack.cfftnd, numpy.dtype(numpy.complex64): _fake_cfftnd, numpy.dtype(numpy.complex128): _fftpack.zfftnd, # numpy.dtype(numpy.float32): _fftpack.cfftnd, numpy.dtype(numpy.float32): _fake_cfftnd, numpy.dtype(numpy.float64): _fftpack.zfftnd, } def _asfarray(x): """Like numpy asfarray, except that it does not modify x dtype if x is already an array with a float dtype, and do not cast complex types to real.""" if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]: # 'dtype' attribute does not ensure that the # object is an ndarray (e.g. Series class # from the pandas library) if x.dtype == numpy.half: # no half-precision routines, so convert to single precision return numpy.asarray(x, dtype=numpy.float32) return numpy.asarray(x, dtype=x.dtype) else: # We cannot use asfarray directly because it converts sequences of # complex to sequence of real ret = numpy.asarray(x) if ret.dtype == numpy.half: return numpy.asarray(ret, dtype=numpy.float32) elif ret.dtype.char not in numpy.typecodes["AllFloat"]: return numpy.asfarray(x) return ret def _fix_shape(x, n, axis): """ Internal auxiliary function for _raw_fft, _raw_fftnd.""" s = list(x.shape) if s[axis] > n: index = [slice(None)]*len(s) index[axis] = slice(0,n) x = x[index] return x, False else: index = [slice(None)]*len(s) index[axis] = slice(0,s[axis]) s[axis] = n z = zeros(s,x.dtype.char) z[index] = x return z, True def _raw_fft(x, n, axis, direction, overwrite_x, work_function): """ Internal auxiliary function for fft, ifft, rfft, irfft.""" if n is None: n = x.shape[axis] elif n != x.shape[axis]: x, copy_made = _fix_shape(x,n,axis) overwrite_x = overwrite_x or copy_made if n < 1: raise ValueError("Invalid number of FFT data points " "(%d) specified." % n) if axis == -1 or axis == len(x.shape)-1: r = work_function(x,n,direction,overwrite_x=overwrite_x) else: x = swapaxes(x, axis, -1) r = work_function(x,n,direction,overwrite_x=overwrite_x) r = swapaxes(r, axis, -1) return r def fft(x, n=None, axis=-1, overwrite_x=False): """ Return discrete Fourier transform of real or complex sequence. The returned complex array contains ``y(0), y(1),..., y(n-1)`` where ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``. Parameters ---------- x : array_like Array to Fourier transform. n : int, optional Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the fft's are computed; the default is over the last axis (i.e., ``axis=-1``). overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- z : complex ndarray with the elements:: [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd where:: y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1 See Also -------- ifft : Inverse FFT rfft : FFT of a real sequence Notes ----- The packing of the result is "standard": If ``A = fft(a, n)``, then ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency terms, in order of decreasingly negative frequency. So for an 8-point transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`. Both single and double precision routines are implemented. Half precision inputs will be converted to single precision. Non floating-point inputs will be converted to double precision. Long-double precision inputs are not supported. This function is most efficient when `n` is a power of two, and least efficient when `n` is prime. Note that if ``x`` is real-valued then ``A[j] == A[n-j].conjugate()``. If ``x`` is real-valued and ``n`` is even then ``A[n/2]`` is real. If the data type of `x` is real, a "real FFT" algorithm is automatically used, which roughly halves the computation time. To increase efficiency a little further, use `rfft`, which does the same calculation, but only outputs half of the symmetrical spectrum. If the data is both real and symmetrical, the `dct` can again double the efficiency, by generating half of the spectrum from half of the signal. Examples -------- >>> from scipy.fftpack import fft, ifft >>> x = np.arange(5) >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy. True """ tmp = _asfarray(x) try: work_function = _DTYPE_TO_FFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): overwrite_x = 1 overwrite_x = overwrite_x or _datacopied(tmp, x) if n is None: n = tmp.shape[axis] elif n != tmp.shape[axis]: tmp, copy_made = _fix_shape(tmp,n,axis) overwrite_x = overwrite_x or copy_made if n < 1: raise ValueError("Invalid number of FFT data points " "(%d) specified." % n) if axis == -1 or axis == len(tmp.shape) - 1: return work_function(tmp,n,1,0,overwrite_x) tmp = swapaxes(tmp, axis, -1) tmp = work_function(tmp,n,1,0,overwrite_x) return swapaxes(tmp, axis, -1) def ifft(x, n=None, axis=-1, overwrite_x=False): """ Return discrete inverse Fourier transform of real or complex sequence. The returned complex array contains ``y(0), y(1),..., y(n-1)`` where ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``. Parameters ---------- x : array_like Transformed data to invert. n : int, optional Length of the inverse Fourier transform. If ``n < x.shape[axis]``, `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The default results in ``n = x.shape[axis]``. axis : int, optional Axis along which the ifft's are computed; the default is over the last axis (i.e., ``axis=-1``). overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- ifft : ndarray of floats The inverse discrete Fourier transform. See Also -------- fft : Forward FFT Notes ----- Both single and double precision routines are implemented. Half precision inputs will be converted to single precision. Non floating-point inputs will be converted to double precision. Long-double precision inputs are not supported. This function is most efficient when `n` is a power of two, and least efficient when `n` is prime. If the data type of `x` is real, a "real IFFT" algorithm is automatically used, which roughly halves the computation time. Examples -------- >>> from scipy.fftpack import fft, ifft >>> import numpy as np >>> x = np.arange(5) >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy. True """ tmp = _asfarray(x) try: work_function = _DTYPE_TO_FFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): overwrite_x = 1 overwrite_x = overwrite_x or _datacopied(tmp, x) if n is None: n = tmp.shape[axis] elif n != tmp.shape[axis]: tmp, copy_made = _fix_shape(tmp,n,axis) overwrite_x = overwrite_x or copy_made if n < 1: raise ValueError("Invalid number of FFT data points " "(%d) specified." % n) if axis == -1 or axis == len(tmp.shape) - 1: return work_function(tmp,n,-1,1,overwrite_x) tmp = swapaxes(tmp, axis, -1) tmp = work_function(tmp,n,-1,1,overwrite_x) return swapaxes(tmp, axis, -1) def rfft(x, n=None, axis=-1, overwrite_x=False): """ Discrete Fourier transform of a real sequence. Parameters ---------- x : array_like, real-valued The data to transform. n : int, optional Defines the length of the Fourier transform. If `n` is not specified (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. axis : int, optional The axis along which the transform is applied. The default is the last axis. overwrite_x : bool, optional If set to true, the contents of `x` can be overwritten. Default is False. Returns ------- z : real ndarray The returned real array contains:: [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd where:: y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) j = 0..n-1 See Also -------- fft, irfft, numpy.fft.rfft Notes ----- Within numerical accuracy, ``y == rfft(irfft(y))``. Both single and double precision routines are implemented. Half precision inputs will be converted to single precision. Non floating-point inputs will be converted to double precision. Long-double precision inputs are not supported. To get an output with a complex datatype, consider using the related function `numpy.fft.rfft`. Examples -------- >>> from scipy.fftpack import fft, rfft >>> a = [9, -9, 1, 3] >>> fft(a) array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) >>> rfft(a) array([ 4., 8., 12., 16.]) """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError("1st argument must be real sequence") try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fft(tmp,n,axis,1,overwrite_x,work_function) def irfft(x, n=None, axis=-1, overwrite_x=False): """ Return inverse discrete Fourier transform of real sequence x. The contents of `x` are interpreted as the output of the `rfft` function. Parameters ---------- x : array_like Transformed data to invert. n : int, optional Length of the inverse Fourier transform. If n < x.shape[axis], x is truncated. If n > x.shape[axis], x is zero-padded. The default results in n = x.shape[axis]. axis : int, optional Axis along which the ifft's are computed; the default is over the last axis (i.e., axis=-1). overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- irfft : ndarray of floats The inverse discrete Fourier transform. See Also -------- rfft, ifft, numpy.fft.irfft Notes ----- The returned real array contains:: [y(0),y(1),...,y(n-1)] where for n is even:: y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0] + (-1)**(j) x[n-1]) and for n is odd:: y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0]) c.c. denotes complex conjugate of preceding expression. For details on input parameters, see `rfft`. To process (conjugate-symmetric) frequency-domain data with a complex datatype, consider using the related function `numpy.fft.irfft`. """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError("1st argument must be real sequence") try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function) def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function): """ Internal auxiliary function for fftnd, ifftnd.""" if s is None: if axes is None: s = x.shape else: s = numpy.take(x.shape, axes) s = tuple(s) if axes is None: noaxes = True axes = list(range(-x.ndim, 0)) else: noaxes = False if len(axes) != len(s): raise ValueError("when given, axes and shape arguments " "have to be of the same length") for dim in s: if dim < 1: raise ValueError("Invalid number of FFT data points " "(%s) specified." % (s,)) # No need to swap axes, array is in C order if noaxes: for i in axes: x, copy_made = _fix_shape(x, s[i], i) overwrite_x = overwrite_x or copy_made return work_function(x,s,direction,overwrite_x=overwrite_x) # We ordered axes, because the code below to push axes at the end of the # array assumes axes argument is in ascending order. a = numpy.array(axes, numpy.intc) abs_axes = numpy.where(a < 0, a + x.ndim, a) id_ = numpy.argsort(abs_axes) axes = [axes[i] for i in id_] s = [s[i] for i in id_] # Swap the request axes, last first (i.e. First swap the axis which ends up # at -1, then at -2, etc...), such as the request axes on which the # operation is carried become the last ones for i in range(1, len(axes)+1): x = numpy.swapaxes(x, axes[-i], -i) # We can now operate on the axes waxes, the p last axes (p = len(axes)), by # fixing the shape of the input array to 1 for any axis the fft is not # carried upon. waxes = list(range(x.ndim - len(axes), x.ndim)) shape = numpy.ones(x.ndim) shape[waxes] = s for i in range(len(waxes)): x, copy_made = _fix_shape(x, s[i], waxes[i]) overwrite_x = overwrite_x or copy_made r = work_function(x, shape, direction, overwrite_x=overwrite_x) # reswap in the reverse order (first axis first, etc...) to get original # order for i in range(len(axes), 0, -1): r = numpy.swapaxes(r, -i, axes[-i]) return r def fftn(x, shape=None, axes=None, overwrite_x=False): """ Return multidimensional discrete Fourier transform. The returned array contains:: y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) where d = len(x.shape) and n = x.shape. Parameters ---------- x : array_like The (n-dimensional) array to transform. shape : tuple of ints, optional The shape of the result. If both `shape` and `axes` (see below) are None, `shape` is ``x.shape``; if `shape` is None but `axes` is not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to length ``shape[i]``. axes : array_like of ints, optional The axes of `x` (`y` if `shape` is not None) along which the transform is applied. overwrite_x : bool, optional If True, the contents of `x` can be destroyed. Default is False. Returns ------- y : complex-valued n-dimensional numpy array The (n-dimensional) DFT of the input array. See Also -------- ifftn Notes ----- If ``x`` is real-valued, then ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``. Both single and double precision routines are implemented. Half precision inputs will be converted to single precision. Non floating-point inputs will be converted to double precision. Long-double precision inputs are not supported. Examples -------- >>> from scipy.fftpack import fftn, ifftn >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) >>> np.allclose(y, fftn(ifftn(y))) True """ return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1) def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction): tmp = _asfarray(x) try: work_function = _DTYPE_TO_FFTN[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): overwrite_x = 1 overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function) def ifftn(x, shape=None, axes=None, overwrite_x=False): """ Return inverse multi-dimensional discrete Fourier transform of arbitrary type sequence x. The returned array contains:: y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``. For description of parameters see `fftn`. See Also -------- fftn : for detailed information. Examples -------- >>> from scipy.fftpack import fftn, ifftn >>> import numpy as np >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) >>> np.allclose(y, ifftn(fftn(y))) True """ return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1) def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False): """ 2-D discrete Fourier transform. Return the two-dimensional discrete Fourier transform of the 2-D argument `x`. See Also -------- fftn : for detailed information. """ return fftn(x,shape,axes,overwrite_x) def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False): """ 2-D discrete inverse Fourier transform of real or complex sequence. Return inverse two-dimensional discrete Fourier transform of arbitrary type sequence x. See `ifft` for more information. See also -------- fft2, ifft """ return ifftn(x,shape,axes,overwrite_x)
22,156
30.032213
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/test_real_transforms.py
from __future__ import division, print_function, absolute_import from os.path import join, dirname import numpy as np from numpy.testing import assert_array_almost_equal, assert_equal from pytest import raises as assert_raises from scipy.fftpack.realtransforms import ( dct, idct, dst, idst, dctn, idctn, dstn, idstn) # Matlab reference data MDATA = np.load(join(dirname(__file__), 'test.npz')) X = [MDATA['x%d' % i] for i in range(8)] Y = [MDATA['y%d' % i] for i in range(8)] # FFTW reference data: the data are organized as follows: # * SIZES is an array containing all available sizes # * for every type (1, 2, 3, 4) and every size, the array dct_type_size # contains the output of the DCT applied to the input np.linspace(0, size-1, # size) FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] def fftw_dct_ref(type, size, dt): x = np.linspace(0, size-1, size).astype(dt) dt = np.result_type(np.float32, dt) if dt == np.double: data = FFTWDATA_DOUBLE elif dt == np.float32: data = FFTWDATA_SINGLE else: raise ValueError() y = (data['dct_%d_%d' % (type, size)]).astype(dt) return x, y, dt def fftw_dst_ref(type, size, dt): x = np.linspace(0, size-1, size).astype(dt) dt = np.result_type(np.float32, dt) if dt == np.double: data = FFTWDATA_DOUBLE elif dt == np.float32: data = FFTWDATA_SINGLE else: raise ValueError() y = (data['dst_%d_%d' % (type, size)]).astype(dt) return x, y, dt def dct_2d_ref(x, **kwargs): """ used as a reference in testing dct2. """ x = np.array(x, copy=True) for row in range(x.shape[0]): x[row, :] = dct(x[row, :], **kwargs) for col in range(x.shape[1]): x[:, col] = dct(x[:, col], **kwargs) return x def idct_2d_ref(x, **kwargs): """ used as a reference in testing idct2. """ x = np.array(x, copy=True) for row in range(x.shape[0]): x[row, :] = idct(x[row, :], **kwargs) for col in range(x.shape[1]): x[:, col] = idct(x[:, col], **kwargs) return x def dst_2d_ref(x, **kwargs): """ used as a reference in testing dst2. """ x = np.array(x, copy=True) for row in range(x.shape[0]): x[row, :] = dst(x[row, :], **kwargs) for col in range(x.shape[1]): x[:, col] = dst(x[:, col], **kwargs) return x def idst_2d_ref(x, **kwargs): """ used as a reference in testing idst2. """ x = np.array(x, copy=True) for row in range(x.shape[0]): x[row, :] = idst(x[row, :], **kwargs) for col in range(x.shape[1]): x[:, col] = idst(x[:, col], **kwargs) return x class TestComplex(object): def test_dct_complex64(self): y = dct(1j*np.arange(5, dtype=np.complex64)) x = 1j*dct(np.arange(5)) assert_array_almost_equal(x, y) def test_dct_complex(self): y = dct(np.arange(5)*1j) x = 1j*dct(np.arange(5)) assert_array_almost_equal(x, y) def test_idct_complex(self): y = idct(np.arange(5)*1j) x = 1j*idct(np.arange(5)) assert_array_almost_equal(x, y) def test_dst_complex64(self): y = dst(np.arange(5, dtype=np.complex64)*1j) x = 1j*dst(np.arange(5)) assert_array_almost_equal(x, y) def test_dst_complex(self): y = dst(np.arange(5)*1j) x = 1j*dst(np.arange(5)) assert_array_almost_equal(x, y) def test_idst_complex(self): y = idst(np.arange(5)*1j) x = 1j*idst(np.arange(5)) assert_array_almost_equal(x, y) class _TestDCTBase(object): def setup_method(self): self.rdt = None self.dec = 14 self.type = None def test_definition(self): for i in FFTWDATA_SIZES: x, yr, dt = fftw_dct_ref(self.type, i, self.rdt) y = dct(x, type=self.type) assert_equal(y.dtype, dt) # XXX: we divide by np.max(y) because the tests fail otherwise. We # should really use something like assert_array_approx_equal. The # difference is due to fftw using a better algorithm w.r.t error # propagation compared to the ones from fftpack. assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, err_msg="Size %d failed" % i) def test_axis(self): nt = 2 for i in [7, 8, 9, 16, 32, 64]: x = np.random.randn(nt, i) y = dct(x, type=self.type) for j in range(nt): assert_array_almost_equal(y[j], dct(x[j], type=self.type), decimal=self.dec) x = x.T y = dct(x, axis=0, type=self.type) for j in range(nt): assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), decimal=self.dec) class _TestDCTIIBase(_TestDCTBase): def test_definition_matlab(self): # Test correspondence with matlab (orthornomal mode). for i in range(len(X)): dt = np.result_type(np.float32, self.rdt) x = np.array(X[i], dtype=dt) yr = Y[i] y = dct(x, norm="ortho", type=2) assert_equal(y.dtype, dt) assert_array_almost_equal(y, yr, decimal=self.dec) class _TestDCTIIIBase(_TestDCTBase): def test_definition_ortho(self): # Test orthornomal mode. for i in range(len(X)): x = np.array(X[i], dtype=self.rdt) dt = np.result_type(np.float32, self.rdt) y = dct(x, norm='ortho', type=2) xi = dct(y, norm="ortho", type=3) assert_equal(xi.dtype, dt) assert_array_almost_equal(xi, x, decimal=self.dec) class TestDCTIDouble(_TestDCTBase): def setup_method(self): self.rdt = np.double self.dec = 10 self.type = 1 class TestDCTIFloat(_TestDCTBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 1 class TestDCTIInt(_TestDCTBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 1 class TestDCTIIDouble(_TestDCTIIBase): def setup_method(self): self.rdt = np.double self.dec = 10 self.type = 2 class TestDCTIIFloat(_TestDCTIIBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 2 class TestDCTIIInt(_TestDCTIIBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 2 class TestDCTIIIDouble(_TestDCTIIIBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 3 class TestDCTIIIFloat(_TestDCTIIIBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 3 class TestDCTIIIInt(_TestDCTIIIBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 3 class _TestIDCTBase(object): def setup_method(self): self.rdt = None self.dec = 14 self.type = None def test_definition(self): for i in FFTWDATA_SIZES: xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt) x = idct(yr, type=self.type) if self.type == 1: x /= 2 * (i-1) else: x /= 2 * i assert_equal(x.dtype, dt) # XXX: we divide by np.max(y) because the tests fail otherwise. We # should really use something like assert_array_approx_equal. The # difference is due to fftw using a better algorithm w.r.t error # propagation compared to the ones from fftpack. assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, err_msg="Size %d failed" % i) class TestIDCTIDouble(_TestIDCTBase): def setup_method(self): self.rdt = np.double self.dec = 10 self.type = 1 class TestIDCTIFloat(_TestIDCTBase): def setup_method(self): self.rdt = np.float32 self.dec = 4 self.type = 1 class TestIDCTIInt(_TestIDCTBase): def setup_method(self): self.rdt = int self.dec = 4 self.type = 1 class TestIDCTIIDouble(_TestIDCTBase): def setup_method(self): self.rdt = np.double self.dec = 10 self.type = 2 class TestIDCTIIFloat(_TestIDCTBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 2 class TestIDCTIIInt(_TestIDCTBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 2 class TestIDCTIIIDouble(_TestIDCTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 3 class TestIDCTIIIFloat(_TestIDCTBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 3 class TestIDCTIIIInt(_TestIDCTBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 3 class _TestDSTBase(object): def setup_method(self): self.rdt = None # dtype self.dec = None # number of decimals to match self.type = None # dst type def test_definition(self): for i in FFTWDATA_SIZES: xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) y = dst(xr, type=self.type) assert_equal(y.dtype, dt) # XXX: we divide by np.max(y) because the tests fail otherwise. We # should really use something like assert_array_approx_equal. The # difference is due to fftw using a better algorithm w.r.t error # propagation compared to the ones from fftpack. assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, err_msg="Size %d failed" % i) class TestDSTIDouble(_TestDSTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 1 class TestDSTIFloat(_TestDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 5 self.type = 1 class TestDSTIInt(_TestDSTBase): def setup_method(self): self.rdt = int self.dec = 5 self.type = 1 class TestDSTIIDouble(_TestDSTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 2 class TestDSTIIFloat(_TestDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 6 self.type = 2 class TestDSTIIInt(_TestDSTBase): def setup_method(self): self.rdt = int self.dec = 6 self.type = 2 class TestDSTIIIDouble(_TestDSTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 3 class TestDSTIIIFloat(_TestDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 7 self.type = 3 class TestDSTIIIInt(_TestDSTBase): def setup_method(self): self.rdt = int self.dec = 7 self.type = 3 class _TestIDSTBase(object): def setup_method(self): self.rdt = None self.dec = None self.type = None def test_definition(self): for i in FFTWDATA_SIZES: xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) x = idst(yr, type=self.type) if self.type == 1: x /= 2 * (i+1) else: x /= 2 * i assert_equal(x.dtype, dt) # XXX: we divide by np.max(x) because the tests fail otherwise. We # should really use something like assert_array_approx_equal. The # difference is due to fftw using a better algorithm w.r.t error # propagation compared to the ones from fftpack. assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, err_msg="Size %d failed" % i) class TestIDSTIDouble(_TestIDSTBase): def setup_method(self): self.rdt = np.double self.dec = 12 self.type = 1 class TestIDSTIFloat(_TestIDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 4 self.type = 1 class TestIDSTIInt(_TestIDSTBase): def setup_method(self): self.rdt = int self.dec = 4 self.type = 1 class TestIDSTIIDouble(_TestIDSTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 2 class TestIDSTIIFloat(_TestIDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 6 self.type = 2 class TestIDSTIIInt(_TestIDSTBase): def setup_method(self): self.rdt = int self.dec = 6 self.type = 2 class TestIDSTIIIDouble(_TestIDSTBase): def setup_method(self): self.rdt = np.double self.dec = 14 self.type = 3 class TestIDSTIIIFloat(_TestIDSTBase): def setup_method(self): self.rdt = np.float32 self.dec = 6 self.type = 3 class TestIDSTIIIInt(_TestIDSTBase): def setup_method(self): self.rdt = int self.dec = 6 self.type = 3 class TestOverwrite(object): """Check input overwrite behavior """ real_dtypes = [np.float32, np.float64] def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, should_overwrite, **kw): x2 = x.copy() routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % ( routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) if not should_overwrite: assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes): np.random.seed(1234) if np.issubdtype(dtype, np.complexfloating): data = np.random.randn(*shape) + 1j*np.random.randn(*shape) else: data = np.random.randn(*shape) data = data.astype(dtype) for type in [1, 2, 3]: for overwrite_x in [True, False]: for norm in [None, 'ortho']: if type == 1 and norm == 'ortho': continue should_overwrite = (overwrite_x and dtype in overwritable_dtypes and (len(shape) == 1 or (axis % len(shape) == len(shape)-1 ))) self._check(data, routine, type, None, axis, norm, overwrite_x, should_overwrite) def test_dct(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(dct, dtype, (16,), -1, overwritable) self._check_1d(dct, dtype, (16, 2), 0, overwritable) self._check_1d(dct, dtype, (2, 16), 1, overwritable) def test_idct(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(idct, dtype, (16,), -1, overwritable) self._check_1d(idct, dtype, (16, 2), 0, overwritable) self._check_1d(idct, dtype, (2, 16), 1, overwritable) def test_dst(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(dst, dtype, (16,), -1, overwritable) self._check_1d(dst, dtype, (16, 2), 0, overwritable) self._check_1d(dst, dtype, (2, 16), 1, overwritable) def test_idst(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(idst, dtype, (16,), -1, overwritable) self._check_1d(idst, dtype, (16, 2), 0, overwritable) self._check_1d(idst, dtype, (2, 16), 1, overwritable) class Test_DCTN_IDCTN(object): dec = 14 types = [1, 2, 3] norms = [None, 'ortho'] rstate = np.random.RandomState(1234) shape = (32, 16) data = rstate.randn(*shape) # Sets of functions to test function_sets = [dict(forward=dctn, inverse=idctn, forward_ref=dct_2d_ref, inverse_ref=idct_2d_ref), dict(forward=dstn, inverse=idstn, forward_ref=dst_2d_ref, inverse_ref=idst_2d_ref), ] def test_axes_round_trip(self): norm = 'ortho' for function_set in self.function_sets: fforward = function_set['forward'] finverse = function_set['inverse'] for axes in [None, (1, ), (0, ), (0, 1), (-2, -1)]: for dct_type in self.types: if norm == 'ortho' and dct_type == 1: continue # 'ortho' not supported by DCT-I tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) assert_array_almost_equal(self.data, tmp, decimal=self.dec) def test_dctn_vs_2d_reference(self): for function_set in self.function_sets: fforward = function_set['forward'] fforward_ref = function_set['forward_ref'] for dct_type in self.types: for norm in self.norms: if norm == 'ortho' and dct_type == 1: continue # 'ortho' not supported by DCT-I y1 = fforward(self.data, type=dct_type, axes=None, norm=norm) y2 = fforward_ref(self.data, type=dct_type, norm=norm) assert_array_almost_equal(y1, y2, decimal=11) def test_idctn_vs_2d_reference(self): for function_set in self.function_sets: finverse = function_set['inverse'] finverse_ref = function_set['inverse_ref'] for dct_type in self.types: for norm in self.norms: print(function_set, dct_type, norm) if norm == 'ortho' and dct_type == 1: continue # 'ortho' not supported by DCT-I fdata = dctn(self.data, type=dct_type, norm=norm) y1 = finverse(fdata, type=dct_type, norm=norm) y2 = finverse_ref(fdata, type=dct_type, norm=norm) assert_array_almost_equal(y1, y2, decimal=11) def test_axes_and_shape(self): for function_set in self.function_sets: fforward = function_set['forward'] finverse = function_set['inverse'] # shape must match the number of axes assert_raises(ValueError, fforward, self.data, shape=(self.data.shape[0], ), axes=(0, 1)) assert_raises(ValueError, fforward, self.data, shape=(self.data.shape[0], ), axes=None) assert_raises(ValueError, fforward, self.data, shape=self.data.shape, axes=(0, )) # shape must be a tuple assert_raises(TypeError, fforward, self.data, shape=self.data.shape[0], axes=(0, 1)) # shape=None works with a subset of axes for axes in [(0, ), (1, )]: tmp = fforward(self.data, shape=None, axes=axes, norm='ortho') tmp = finverse(tmp, shape=None, axes=axes, norm='ortho') assert_array_almost_equal(self.data, tmp, decimal=self.dec) # non-default shape tmp = fforward(self.data, shape=(128, 128), axes=None) assert_equal(tmp.shape, (128, 128))
20,163
29.831804
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/gen_fftw_ref.py
from __future__ import division, print_function, absolute_import from subprocess import Popen, PIPE, STDOUT import numpy as np SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024] def gen_data(dt): arrays = {} if dt == np.double: pg = './fftw_double' elif dt == np.float32: pg = './fftw_single' else: raise ValueError("unknown: %s" % dt) # Generate test data using FFTW for reference for type in [1, 2, 3, 4, 5, 6, 7, 8]: arrays[type] = {} for sz in SZ: a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT) st = [i.strip() for i in a.stdout.readlines()] arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt) return arrays # generate single precision data data = gen_data(np.float32) filename = 'fftw_single_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d) # generate double precision data data = gen_data(np.float64) filename = 'fftw_double_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d)
1,524
24.416667
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/test_basic.py
# Created by Pearu Peterson, September 2002 from __future__ import division, print_function, absolute_import __usage__ = """ Build fftpack: python setup_fftpack.py build Run tests if scipy is installed: python -c 'import scipy;scipy.fftpack.test()' Run tests if fftpack is not installed: python tests/test_basic.py """ from numpy.testing import (assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_less) import pytest from pytest import raises as assert_raises from scipy.fftpack import ifft,fft,fftn,ifftn,rfft,irfft, fft2 from scipy.fftpack import _fftpack as fftpack from scipy.fftpack.basic import _is_safe_size from numpy import (arange, add, array, asarray, zeros, dot, exp, pi, swapaxes, double, cdouble) import numpy as np import numpy.fft # "large" composite numbers supported by FFTPACK LARGE_COMPOSITE_SIZES = [ 2**13, 2**5 * 3**5, 2**3 * 3**3 * 5**2, ] SMALL_COMPOSITE_SIZES = [ 2, 2*3*5, 2*2*3*3, ] # prime LARGE_PRIME_SIZES = [ 2011 ] SMALL_PRIME_SIZES = [ 29 ] from numpy.random import rand def _assert_close_in_norm(x, y, rtol, size, rdt): # helper function for testing err_msg = "size: %s rdt: %s" % (size, rdt) assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) def random(size): return rand(*size) def get_mat(n): data = arange(n) data = add.outer(data,data) return data def direct_dft(x): x = asarray(x) n = len(x) y = zeros(n,dtype=cdouble) w = -arange(n)*(2j*pi/n) for i in range(n): y[i] = dot(exp(i*w),x) return y def direct_idft(x): x = asarray(x) n = len(x) y = zeros(n,dtype=cdouble) w = arange(n)*(2j*pi/n) for i in range(n): y[i] = dot(exp(i*w),x)/n return y def direct_dftn(x): x = asarray(x) for axis in range(len(x.shape)): x = fft(x,axis=axis) return x def direct_idftn(x): x = asarray(x) for axis in range(len(x.shape)): x = ifft(x,axis=axis) return x def direct_rdft(x): x = asarray(x) n = len(x) w = -arange(n)*(2j*pi/n) r = zeros(n,dtype=double) for i in range(n//2+1): y = dot(exp(i*w),x) if i: r[2*i-1] = y.real if 2*i < n: r[2*i] = y.imag else: r[0] = y.real return r def direct_irdft(x): x = asarray(x) n = len(x) x1 = zeros(n,dtype=cdouble) for i in range(n//2+1): if i: if 2*i < n: x1[i] = x[2*i-1] + 1j*x[2*i] x1[n-i] = x[2*i-1] - 1j*x[2*i] else: x1[i] = x[2*i-1] else: x1[0] = x[0] return direct_idft(x1).real class _TestFFTBase(object): def setup_method(self): self.cdt = None self.rdt = None np.random.seed(1234) def test_definition(self): x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) y = fft(x) assert_equal(y.dtype, self.cdt) y1 = direct_dft(x) assert_array_almost_equal(y,y1) x = np.array([1,2,3,4+0j,5], dtype=self.cdt) assert_array_almost_equal(fft(x),direct_dft(x)) def test_n_argument_real(self): x1 = np.array([1,2,3,4], dtype=self.rdt) x2 = np.array([1,2,3,4], dtype=self.rdt) y = fft([x1,x2],n=4) assert_equal(y.dtype, self.cdt) assert_equal(y.shape,(2,4)) assert_array_almost_equal(y[0],direct_dft(x1)) assert_array_almost_equal(y[1],direct_dft(x2)) def _test_n_argument_complex(self): x1 = np.array([1,2,3,4+1j], dtype=self.cdt) x2 = np.array([1,2,3,4+1j], dtype=self.cdt) y = fft([x1,x2],n=4) assert_equal(y.dtype, self.cdt) assert_equal(y.shape,(2,4)) assert_array_almost_equal(y[0],direct_dft(x1)) assert_array_almost_equal(y[1],direct_dft(x2)) def test_djbfft(self): for i in range(2,14): n = 2**i x = list(range(n)) y = fftpack.zfft(x) y2 = numpy.fft.fft(x) assert_array_almost_equal(y,y2) y = fftpack.zrfft(x) assert_array_almost_equal(y,y2) def test_invalid_sizes(self): assert_raises(ValueError, fft, []) assert_raises(ValueError, fft, [[1,1],[2,2]], -5) def test__is_safe_size(self): vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False), (15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True), (120, True), (210, False)] for n, is_safe in vals: assert_equal(_is_safe_size(n), is_safe) class TestDoubleFFT(_TestFFTBase): def setup_method(self): self.cdt = np.cdouble self.rdt = np.double class TestSingleFFT(_TestFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 @pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved") def test_notice(self): pass class TestFloat16FFT(object): def test_1_argument_real(self): x1 = np.array([1, 2, 3, 4], dtype=np.float16) y = fft(x1, n=4) assert_equal(y.dtype, np.complex64) assert_equal(y.shape, (4, )) assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) def test_n_argument_real(self): x1 = np.array([1, 2, 3, 4], dtype=np.float16) x2 = np.array([1, 2, 3, 4], dtype=np.float16) y = fft([x1, x2], n=4) assert_equal(y.dtype, np.complex64) assert_equal(y.shape, (2, 4)) assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) class _TestIFFTBase(object): def setup_method(self): np.random.seed(1234) def test_definition(self): x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) y = ifft(x) y1 = direct_idft(x) assert_equal(y.dtype, self.cdt) assert_array_almost_equal(y,y1) x = np.array([1,2,3,4+0j,5], self.cdt) assert_array_almost_equal(ifft(x),direct_idft(x)) def test_definition_real(self): x = np.array([1,2,3,4,1,2,3,4], self.rdt) y = ifft(x) assert_equal(y.dtype, self.cdt) y1 = direct_idft(x) assert_array_almost_equal(y,y1) x = np.array([1,2,3,4,5], dtype=self.rdt) assert_equal(y.dtype, self.cdt) assert_array_almost_equal(ifft(x),direct_idft(x)) def test_djbfft(self): for i in range(2,14): n = 2**i x = list(range(n)) y = fftpack.zfft(x,direction=-1) y2 = numpy.fft.ifft(x) assert_array_almost_equal(y,y2) y = fftpack.zrfft(x,direction=-1) assert_array_almost_equal(y,y2) def test_random_complex(self): for size in [1,51,111,100,200,64,128,256,1024]: x = random([size]).astype(self.cdt) x = random([size]).astype(self.cdt) + 1j*x y1 = ifft(fft(x)) y2 = fft(ifft(x)) assert_equal(y1.dtype, self.cdt) assert_equal(y2.dtype, self.cdt) assert_array_almost_equal(y1, x) assert_array_almost_equal(y2, x) def test_random_real(self): for size in [1,51,111,100,200,64,128,256,1024]: x = random([size]).astype(self.rdt) y1 = ifft(fft(x)) y2 = fft(ifft(x)) assert_equal(y1.dtype, self.cdt) assert_equal(y2.dtype, self.cdt) assert_array_almost_equal(y1, x) assert_array_almost_equal(y2, x) def test_size_accuracy(self): # Sanity check for the accuracy for prime and non-prime sized inputs if self.rdt == np.float32: rtol = 1e-5 elif self.rdt == np.float64: rtol = 1e-10 for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size).astype(self.rdt) y = ifft(fft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) y = fft(ifft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) x = (x + 1j*np.random.rand(size)).astype(self.cdt) y = ifft(fft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) y = fft(ifft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) def test_invalid_sizes(self): assert_raises(ValueError, ifft, []) assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) class TestDoubleIFFT(_TestIFFTBase): def setup_method(self): self.cdt = np.cdouble self.rdt = np.double class TestSingleIFFT(_TestIFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 class _TestRFFTBase(object): def setup_method(self): np.random.seed(1234) def test_definition(self): for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: x = np.array(t, dtype=self.rdt) y = rfft(x) y1 = direct_rdft(x) assert_array_almost_equal(y,y1) assert_equal(y.dtype, self.rdt) def test_djbfft(self): from numpy.fft import fft as numpy_fft for i in range(2,14): n = 2**i x = list(range(n)) y2 = numpy_fft(x) y1 = zeros((n,),dtype=double) y1[0] = y2[0].real y1[-1] = y2[n//2].real for k in range(1, n//2): y1[2*k-1] = y2[k].real y1[2*k] = y2[k].imag y = fftpack.drfft(x) assert_array_almost_equal(y,y1) def test_invalid_sizes(self): assert_raises(ValueError, rfft, []) assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) # See gh-5790 class MockSeries(object): def __init__(self, data): self.data = np.asarray(data) def __getattr__(self, item): try: return getattr(self.data, item) except AttributeError: raise AttributeError(("'MockSeries' object " "has no attribute '{attr}'". format(attr=item))) def test_non_ndarray_with_dtype(self): x = np.array([1., 2., 3., 4., 5.]) xs = _TestRFFTBase.MockSeries(x) expected = [1, 2, 3, 4, 5] out = rfft(xs) # Data should not have been overwritten assert_equal(x, expected) assert_equal(xs.data, expected) class TestRFFTDouble(_TestRFFTBase): def setup_method(self): self.cdt = np.cdouble self.rdt = np.double class TestRFFTSingle(_TestRFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 class _TestIRFFTBase(object): def setup_method(self): np.random.seed(1234) def test_definition(self): x1 = [1,2,3,4,1,2,3,4] x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] x2 = [1,2,3,4,1,2,3,4,5] x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] def _test(x, xr): y = irfft(np.array(x, dtype=self.rdt)) y1 = direct_irdft(x) assert_equal(y.dtype, self.rdt) assert_array_almost_equal(y,y1, decimal=self.ndec) assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) _test(x1, x1_1) _test(x2, x2_1) def test_djbfft(self): from numpy.fft import ifft as numpy_ifft for i in range(2,14): n = 2**i x = list(range(n)) x1 = zeros((n,),dtype=cdouble) x1[0] = x[0] for k in range(1, n//2): x1[k] = x[2*k-1]+1j*x[2*k] x1[n-k] = x[2*k-1]-1j*x[2*k] x1[n//2] = x[-1] y1 = numpy_ifft(x1) y = fftpack.drfft(x,direction=-1) assert_array_almost_equal(y,y1) def test_random_real(self): for size in [1,51,111,100,200,64,128,256,1024]: x = random([size]).astype(self.rdt) y1 = irfft(rfft(x)) y2 = rfft(irfft(x)) assert_equal(y1.dtype, self.rdt) assert_equal(y2.dtype, self.rdt) assert_array_almost_equal(y1, x, decimal=self.ndec, err_msg="size=%d" % size) assert_array_almost_equal(y2, x, decimal=self.ndec, err_msg="size=%d" % size) def test_size_accuracy(self): # Sanity check for the accuracy for prime and non-prime sized inputs if self.rdt == np.float32: rtol = 1e-5 elif self.rdt == np.float64: rtol = 1e-10 for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size).astype(self.rdt) y = irfft(rfft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) y = rfft(irfft(x)) _assert_close_in_norm(x, y, rtol, size, self.rdt) def test_invalid_sizes(self): assert_raises(ValueError, irfft, []) assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) # self.ndec is bogus; we should have a assert_array_approx_equal for number of # significant digits class TestIRFFTDouble(_TestIRFFTBase): def setup_method(self): self.cdt = np.cdouble self.rdt = np.double self.ndec = 14 class TestIRFFTSingle(_TestIRFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 self.ndec = 5 class Testfft2(object): def setup_method(self): np.random.seed(1234) def test_regression_244(self): """fft returns wrong result with axes parameter.""" # fftn (and hence fft2) used to break when both axes and shape were # used x = numpy.ones((4,4,2)) y = fft2(x, shape=(8,8), axes=(-3,-2)) y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) assert_array_almost_equal(y, y_r) def test_invalid_sizes(self): assert_raises(ValueError, fft2, [[]]) assert_raises(ValueError, fft2, [[1,1],[2,2]], (4, -3)) class TestFftnSingle(object): def setup_method(self): np.random.seed(1234) def test_definition(self): x = [[1,2,3],[4,5,6],[7,8,9]] y = fftn(np.array(x, np.float32)) if not y.dtype == np.complex64: raise ValueError("double precision output with single precision") y_r = np.array(fftn(x), np.complex64) assert_array_almost_equal_nulp(y, y_r) def test_size_accuracy(self): for size in SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size, size) + 1j*np.random.rand(size, size) y1 = fftn(x.real.astype(np.float32)) y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) assert_equal(y1.dtype, np.complex64) assert_array_almost_equal_nulp(y1, y2, 2000) for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) y1 = fftn(x.real.astype(np.float32)) y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) assert_equal(y1.dtype, np.complex64) assert_array_almost_equal_nulp(y1, y2, 2000) def test_definition_float16(self): x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] y = fftn(np.array(x, np.float16)) assert_equal(y.dtype, np.complex64) y_r = np.array(fftn(x), np.complex64) assert_array_almost_equal_nulp(y, y_r) def test_float16_input(self): for size in SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size, size) + 1j*np.random.rand(size, size) y1 = fftn(x.real.astype(np.float16)) y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) assert_equal(y1.dtype, np.complex64) assert_array_almost_equal_nulp(y1, y2, 5e5) for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: np.random.seed(1234) x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) y1 = fftn(x.real.astype(np.float16)) y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) assert_equal(y1.dtype, np.complex64) assert_array_almost_equal_nulp(y1, y2, 2e6) class TestFftn(object): def setup_method(self): np.random.seed(1234) def test_definition(self): x = [[1,2,3],[4,5,6],[7,8,9]] y = fftn(x) assert_array_almost_equal(y,direct_dftn(x)) x = random((20,26)) assert_array_almost_equal(fftn(x),direct_dftn(x)) x = random((5,4,3,20)) assert_array_almost_equal(fftn(x),direct_dftn(x)) def test_axes_argument(self): # plane == ji_plane, x== kji_space plane1 = [[1,2,3],[4,5,6],[7,8,9]] plane2 = [[10,11,12],[13,14,15],[16,17,18]] plane3 = [[19,20,21],[22,23,24],[25,26,27]] ki_plane1 = [[1,2,3],[10,11,12],[19,20,21]] ki_plane2 = [[4,5,6],[13,14,15],[22,23,24]] ki_plane3 = [[7,8,9],[16,17,18],[25,26,27]] jk_plane1 = [[1,10,19],[4,13,22],[7,16,25]] jk_plane2 = [[2,11,20],[5,14,23],[8,17,26]] jk_plane3 = [[3,12,21],[6,15,24],[9,18,27]] kj_plane1 = [[1,4,7],[10,13,16],[19,22,25]] kj_plane2 = [[2,5,8],[11,14,17],[20,23,26]] kj_plane3 = [[3,6,9],[12,15,18],[21,24,27]] ij_plane1 = [[1,4,7],[2,5,8],[3,6,9]] ij_plane2 = [[10,13,16],[11,14,17],[12,15,18]] ij_plane3 = [[19,22,25],[20,23,26],[21,24,27]] ik_plane1 = [[1,10,19],[2,11,20],[3,12,21]] ik_plane2 = [[4,13,22],[5,14,23],[6,15,24]] ik_plane3 = [[7,16,25],[8,17,26],[9,18,27]] ijk_space = [jk_plane1,jk_plane2,jk_plane3] ikj_space = [kj_plane1,kj_plane2,kj_plane3] jik_space = [ik_plane1,ik_plane2,ik_plane3] jki_space = [ki_plane1,ki_plane2,ki_plane3] kij_space = [ij_plane1,ij_plane2,ij_plane3] x = array([plane1,plane2,plane3]) assert_array_almost_equal(fftn(x),fftn(x,axes=(-3,-2,-1))) # kji_space assert_array_almost_equal(fftn(x),fftn(x,axes=(0,1,2))) assert_array_almost_equal(fftn(x,axes=(0, 2)),fftn(x,axes=(0,-1))) y = fftn(x,axes=(2,1,0)) # ijk_space assert_array_almost_equal(swapaxes(y,-1,-3),fftn(ijk_space)) y = fftn(x,axes=(2,0,1)) # ikj_space assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3), -1,-2), fftn(ikj_space)) y = fftn(x,axes=(1,2,0)) # jik_space assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3), -3,-2), fftn(jik_space)) y = fftn(x,axes=(1,0,2)) # jki_space assert_array_almost_equal(swapaxes(y,-2,-3),fftn(jki_space)) y = fftn(x,axes=(0,2,1)) # kij_space assert_array_almost_equal(swapaxes(y,-2,-1), fftn(kij_space)) y = fftn(x,axes=(-2,-1)) # ji_plane assert_array_almost_equal(fftn(plane1),y[0]) assert_array_almost_equal(fftn(plane2),y[1]) assert_array_almost_equal(fftn(plane3),y[2]) y = fftn(x,axes=(1,2)) # ji_plane assert_array_almost_equal(fftn(plane1),y[0]) assert_array_almost_equal(fftn(plane2),y[1]) assert_array_almost_equal(fftn(plane3),y[2]) y = fftn(x,axes=(-3,-2)) # kj_plane assert_array_almost_equal(fftn(x[:,:,0]),y[:,:,0]) assert_array_almost_equal(fftn(x[:,:,1]),y[:,:,1]) assert_array_almost_equal(fftn(x[:,:,2]),y[:,:,2]) y = fftn(x,axes=(-3,-1)) # ki_plane assert_array_almost_equal(fftn(x[:,0,:]),y[:,0,:]) assert_array_almost_equal(fftn(x[:,1,:]),y[:,1,:]) assert_array_almost_equal(fftn(x[:,2,:]),y[:,2,:]) y = fftn(x,axes=(-1,-2)) # ij_plane assert_array_almost_equal(fftn(ij_plane1),swapaxes(y[0],-2,-1)) assert_array_almost_equal(fftn(ij_plane2),swapaxes(y[1],-2,-1)) assert_array_almost_equal(fftn(ij_plane3),swapaxes(y[2],-2,-1)) y = fftn(x,axes=(-1,-3)) # ik_plane assert_array_almost_equal(fftn(ik_plane1),swapaxes(y[:,0,:],-1,-2)) assert_array_almost_equal(fftn(ik_plane2),swapaxes(y[:,1,:],-1,-2)) assert_array_almost_equal(fftn(ik_plane3),swapaxes(y[:,2,:],-1,-2)) y = fftn(x,axes=(-2,-3)) # jk_plane assert_array_almost_equal(fftn(jk_plane1),swapaxes(y[:,:,0],-1,-2)) assert_array_almost_equal(fftn(jk_plane2),swapaxes(y[:,:,1],-1,-2)) assert_array_almost_equal(fftn(jk_plane3),swapaxes(y[:,:,2],-1,-2)) y = fftn(x,axes=(-1,)) # i_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[i,j,:]),y[i,j,:]) y = fftn(x,axes=(-2,)) # j_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[i,:,j]),y[i,:,j]) y = fftn(x,axes=(0,)) # k_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[:,i,j]),y[:,i,j]) y = fftn(x,axes=()) # point assert_array_almost_equal(y,x) def test_shape_argument(self): small_x = [[1,2,3],[4,5,6]] large_x1 = [[1,2,3,0],[4,5,6,0],[0,0,0,0],[0,0,0,0]] y = fftn(small_x,shape=(4,4)) assert_array_almost_equal(y,fftn(large_x1)) y = fftn(small_x,shape=(3,4)) assert_array_almost_equal(y,fftn(large_x1[:-1])) def test_shape_axes_argument(self): small_x = [[1,2,3],[4,5,6],[7,8,9]] large_x1 = array([[1,2,3,0], [4,5,6,0], [7,8,9,0], [0,0,0,0]]) # Disable tests with shape and axes of different lengths # y = fftn(small_x,shape=(4,4),axes=(-1,)) # for i in range(4): # assert_array_almost_equal (y[i],fft(large_x1[i])) # y = fftn(small_x,shape=(4,4),axes=(-2,)) # for i in range(4): # assert_array_almost_equal (y[:,i],fft(large_x1[:,i])) y = fftn(small_x,shape=(4,4),axes=(-2,-1)) assert_array_almost_equal(y,fftn(large_x1)) y = fftn(small_x,shape=(4,4),axes=(-1,-2)) assert_array_almost_equal(y,swapaxes( fftn(swapaxes(large_x1,-1,-2)),-1,-2)) def test_shape_axes_argument2(self): # Change shape of the last axis x = numpy.random.random((10, 5, 3, 7)) y = fftn(x, axes=(-1,), shape=(8,)) assert_array_almost_equal(y, fft(x, axis=-1, n=8)) # Change shape of an arbitrary axis which is not the last one x = numpy.random.random((10, 5, 3, 7)) y = fftn(x, axes=(-2,), shape=(8,)) assert_array_almost_equal(y, fft(x, axis=-2, n=8)) # Change shape of axes: cf #244, where shape and axes were mixed up x = numpy.random.random((4,4,2)) y = fftn(x, axes=(-3,-2), shape=(8,8)) assert_array_almost_equal(y, numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) def test_shape_argument_more(self): """Test that fftn raises ValueError when s.shape is longer than x.shape""" x = zeros((4, 4, 2)) assert_raises(ValueError, fftn, x, shape=(8, 8, 2, 1)) def test_invalid_sizes(self): assert_raises(ValueError, fftn, [[]]) assert_raises(ValueError, fftn, [[1,1],[2,2]], (4, -3)) class _TestIfftn(object): dtype = None cdtype = None def setup_method(self): np.random.seed(1234) def test_definition(self): x = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=self.dtype) y = ifftn(x) assert_equal(y.dtype, self.cdtype) assert_array_almost_equal_nulp(y,direct_idftn(x),self.maxnlp) x = random((20,26)) assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp) x = random((5,4,3,20)) assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp) def test_random_complex(self): for size in [1,2,51,32,64,92]: x = random([size,size]) + 1j*random([size,size]) assert_array_almost_equal_nulp(ifftn(fftn(x)),x,self.maxnlp) assert_array_almost_equal_nulp(fftn(ifftn(x)),x,self.maxnlp) def test_invalid_sizes(self): assert_raises(ValueError, ifftn, [[]]) assert_raises(ValueError, ifftn, [[1,1],[2,2]], (4, -3)) class TestIfftnDouble(_TestIfftn): dtype = np.float64 cdtype = np.complex128 maxnlp = 2000 class TestIfftnSingle(_TestIfftn): dtype = np.float32 cdtype = np.complex64 maxnlp = 3500 class TestLongDoubleFailure(object): def setup_method(self): np.random.seed(1234) def test_complex(self): if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize: # longdouble == double; so fft is supported return x = np.random.randn(10).astype(np.longdouble) + \ 1j * np.random.randn(10).astype(np.longdouble) for f in [fft, ifft]: try: f(x) raise AssertionError("Type %r not supported but does not fail" % np.longcomplex) except ValueError: pass def test_real(self): if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize: # longdouble == double; so fft is supported return x = np.random.randn(10).astype(np.longcomplex) for f in [fft, ifft]: try: f(x) raise AssertionError("Type %r not supported but does not fail" % np.longcomplex) except ValueError: pass class FakeArray(object): def __init__(self, data): self._data = data self.__array_interface__ = data.__array_interface__ class FakeArray2(object): def __init__(self, data): self._data = data def __array__(self): return self._data class TestOverwrite(object): """Check input overwrite behavior of the FFT functions """ real_dtypes = [np.float32, np.float64] dtypes = real_dtypes + [np.complex64, np.complex128] def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite): x2 = x.copy() for fake in [lambda x: x, FakeArray, FakeArray2]: routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % ( routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) if not should_overwrite: assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes): np.random.seed(1234) if np.issubdtype(dtype, np.complexfloating): data = np.random.randn(*shape) + 1j*np.random.randn(*shape) else: data = np.random.randn(*shape) data = data.astype(dtype) for fftsize in [8, 16, 32]: for overwrite_x in [True, False]: should_overwrite = (overwrite_x and dtype in overwritable_dtypes and fftsize <= shape[axis] and (len(shape) == 1 or (axis % len(shape) == len(shape)-1 and fftsize == shape[axis]))) self._check(data, routine, fftsize, axis, overwrite_x=overwrite_x, should_overwrite=should_overwrite) def test_fft(self): overwritable = (np.complex128, np.complex64) for dtype in self.dtypes: self._check_1d(fft, dtype, (16,), -1, overwritable) self._check_1d(fft, dtype, (16, 2), 0, overwritable) self._check_1d(fft, dtype, (2, 16), 1, overwritable) def test_ifft(self): overwritable = (np.complex128, np.complex64) for dtype in self.dtypes: self._check_1d(ifft, dtype, (16,), -1, overwritable) self._check_1d(ifft, dtype, (16, 2), 0, overwritable) self._check_1d(ifft, dtype, (2, 16), 1, overwritable) def test_rfft(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(rfft, dtype, (16,), -1, overwritable) self._check_1d(rfft, dtype, (16, 2), 0, overwritable) self._check_1d(rfft, dtype, (2, 16), 1, overwritable) def test_irfft(self): overwritable = self.real_dtypes for dtype in self.real_dtypes: self._check_1d(irfft, dtype, (16,), -1, overwritable) self._check_1d(irfft, dtype, (16, 2), 0, overwritable) self._check_1d(irfft, dtype, (2, 16), 1, overwritable) def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes): np.random.seed(1234) if np.issubdtype(dtype, np.complexfloating): data = np.random.randn(*shape) + 1j*np.random.randn(*shape) else: data = np.random.randn(*shape) data = data.astype(dtype) def fftshape_iter(shp): if len(shp) <= 0: yield () else: for j in (shp[0]//2, shp[0], shp[0]*2): for rest in fftshape_iter(shp[1:]): yield (j,) + rest if axes is None: part_shape = shape else: part_shape = tuple(np.take(shape, axes)) for overwrite_x in [True, False]: for fftshape in fftshape_iter(part_shape): should_overwrite = (overwrite_x and data.ndim == 1 and np.all([x < y for x, y in zip(fftshape, part_shape)]) and dtype in overwritable_dtypes) self._check(data, routine, fftshape, axes, overwrite_x=overwrite_x, should_overwrite=should_overwrite) if data.ndim > 1: # check fortran order: it never overwrites self._check(data.T, routine, fftshape, axes, overwrite_x=overwrite_x, should_overwrite=False) def _check_nd(self, routine, dtype, overwritable): self._check_nd_one(routine, dtype, (16,), None, overwritable) self._check_nd_one(routine, dtype, (16,), (0,), overwritable) self._check_nd_one(routine, dtype, (16, 2), (0,), overwritable) self._check_nd_one(routine, dtype, (2, 16), (1,), overwritable) self._check_nd_one(routine, dtype, (8, 16), None, overwritable) self._check_nd_one(routine, dtype, (8, 16), (0, 1), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (0, 1), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (1, 2), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (0,), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (1,), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (2,), overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), None, overwritable) self._check_nd_one(routine, dtype, (8, 16, 2), (0,1,2), overwritable) def test_fftn(self): overwritable = (np.complex128, np.complex64) for dtype in self.dtypes: self._check_nd(fftn, dtype, overwritable) def test_ifftn(self): overwritable = (np.complex128, np.complex64) for dtype in self.dtypes: self._check_nd(ifftn, dtype, overwritable)
32,319
34.322404
161
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/test_import.py
"""Test possibility of patching fftpack with pyfftw. No module source outside of scipy.fftpack should contain an import of the form `from scipy.fftpack import ...`, so that a simple replacement of scipy.fftpack by the corresponding fftw interface completely swaps the two FFT implementations. Because this simply inspects source files, we only need to run the test on one version of Python. """ import sys if sys.version_info >= (3, 4): from pathlib import Path import re import tokenize from numpy.testing import assert_ import scipy class TestFFTPackImport(object): def test_fftpack_import(self): base = Path(scipy.__file__).parent regexp = r"\s*from.+\.fftpack import .*\n" for path in base.rglob("*.py"): if base / "fftpack" in path.parents: continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: assert_(all(not re.fullmatch(regexp, line) for line in file), "{0} contains an import from fftpack".format(path))
1,250
35.794118
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/gendata.py
from __future__ import division, print_function, absolute_import import numpy as np from scipy.io import loadmat m = loadmat('test.mat', squeeze_me=True, struct_as_record=True, mat_dtype=True) np.savez('test.npz', **m)
229
24.555556
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py
# Created by Pearu Peterson, September 2002 from __future__ import division, print_function, absolute_import __usage__ = """ Build fftpack: python setup_fftpack.py build Run tests if scipy is installed: python -c 'import scipy;scipy.fftpack.test(<level>)' Run tests if fftpack is not installed: python tests/test_pseudo_diffs.py [<level>] """ from numpy.testing import (assert_equal, assert_almost_equal, assert_array_almost_equal) from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert, ihilbert, shift, fftfreq, cs_diff, sc_diff, ss_diff, cc_diff) import numpy as np from numpy import arange, sin, cos, pi, exp, tanh, sum, sign from numpy.random import random def direct_diff(x,k=1,period=None): fx = fft(x) n = len(fx) if period is None: period = 2*pi w = fftfreq(n)*2j*pi/period*n if k < 0: w = 1 / w**k w[0] = 0.0 else: w = w**k if n > 2000: w[250:n-250] = 0.0 return ifft(w*fx).real def direct_tilbert(x,h=1,period=None): fx = fft(x) n = len(fx) if period is None: period = 2*pi w = fftfreq(n)*h*2*pi/period*n w[0] = 1 w = 1j/tanh(w) w[0] = 0j return ifft(w*fx) def direct_itilbert(x,h=1,period=None): fx = fft(x) n = len(fx) if period is None: period = 2*pi w = fftfreq(n)*h*2*pi/period*n w = -1j*tanh(w) return ifft(w*fx) def direct_hilbert(x): fx = fft(x) n = len(fx) w = fftfreq(n)*n w = 1j*sign(w) return ifft(w*fx) def direct_ihilbert(x): return -direct_hilbert(x) def direct_shift(x,a,period=None): n = len(x) if period is None: k = fftfreq(n)*1j*n else: k = fftfreq(n)*2j*pi/period*n return ifft(fft(x)*exp(k*a)).real class TestDiff(object): def test_definition(self): for n in [16,17,64,127,32]: x = arange(n)*2*pi/n assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x))) assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2)) assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3)) assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4)) assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5)) assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3)) assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4)) assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x))) assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2)) assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3)) assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4)) assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x))) assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8))) assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8))) for k in range(5): assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k)) assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k)) def test_period(self): for n in [17,64]: x = arange(n)/float(n) assert_array_almost_equal(diff(sin(2*pi*x),period=1), 2*pi*cos(2*pi*x)) assert_array_almost_equal(diff(sin(2*pi*x),3,period=1), -(2*pi)**3*cos(2*pi*x)) def test_sin(self): for n in [32,64,77]: x = arange(n)*2*pi/n assert_array_almost_equal(diff(sin(x)),cos(x)) assert_array_almost_equal(diff(cos(x)),-sin(x)) assert_array_almost_equal(diff(sin(x),2),-sin(x)) assert_array_almost_equal(diff(sin(x),4),sin(x)) assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x)) assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x))) def test_expr(self): for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]: x = arange(n)*2*pi/n f = sin(x)*cos(4*x)+exp(sin(3*x)) df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) d1 = diff(f) assert_array_almost_equal(d1,df) assert_array_almost_equal(diff(df),ddf) assert_array_almost_equal(diff(f,2),ddf) assert_array_almost_equal(diff(ddf,-1),df) def test_expr_large(self): for n in [2048,4096]: x = arange(n)*2*pi/n f = sin(x)*cos(4*x)+exp(sin(3*x)) df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) assert_array_almost_equal(diff(f),df) assert_array_almost_equal(diff(df),ddf) assert_array_almost_equal(diff(ddf,-1),df) assert_array_almost_equal(diff(f,2),ddf) def test_int(self): n = 64 x = arange(n)*2*pi/n assert_array_almost_equal(diff(sin(x),-1),-cos(x)) assert_array_almost_equal(diff(sin(x),-2),-sin(x)) assert_array_almost_equal(diff(sin(x),-4),sin(x)) assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x)) def test_random_even(self): for k in [0,2,4,6]: for n in [60,32,64,56,55]: f = random((n,)) af = sum(f,axis=0)/n f = f-af # zeroing Nyquist mode: f = diff(diff(f,1),-1) assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(diff(diff(f,k),-k),f) assert_array_almost_equal(diff(diff(f,-k),k),f) def test_random_odd(self): for k in [0,1,2,3,4,5,6]: for n in [33,65,55]: f = random((n,)) af = sum(f,axis=0)/n f = f-af assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(diff(diff(f,k),-k),f) assert_array_almost_equal(diff(diff(f,-k),k),f) def test_zero_nyquist(self): for k in [0,1,2,3,4,5,6]: for n in [32,33,64,56,55]: f = random((n,)) af = sum(f,axis=0)/n f = f-af # zeroing Nyquist mode: f = diff(diff(f,1),-1) assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(diff(diff(f,k),-k),f) assert_array_almost_equal(diff(diff(f,-k),k),f) class TestTilbert(object): def test_definition(self): for h in [0.1,0.5,1,5.5,10]: for n in [16,17,64,127]: x = arange(n)*2*pi/n y = tilbert(sin(x),h) y1 = direct_tilbert(sin(x),h) assert_array_almost_equal(y,y1) assert_array_almost_equal(tilbert(sin(x),h), direct_tilbert(sin(x),h)) assert_array_almost_equal(tilbert(sin(2*x),h), direct_tilbert(sin(2*x),h)) def test_random_even(self): for h in [0.1,0.5,1,5.5,10]: for n in [32,64,56]: f = random((n,)) af = sum(f,axis=0)/n f = f-af assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f) def test_random_odd(self): for h in [0.1,0.5,1,5.5,10]: for n in [33,65,55]: f = random((n,)) af = sum(f,axis=0)/n f = f-af assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(itilbert(tilbert(f,h),h),f) assert_array_almost_equal(tilbert(itilbert(f,h),h),f) class TestITilbert(object): def test_definition(self): for h in [0.1,0.5,1,5.5,10]: for n in [16,17,64,127]: x = arange(n)*2*pi/n y = itilbert(sin(x),h) y1 = direct_itilbert(sin(x),h) assert_array_almost_equal(y,y1) assert_array_almost_equal(itilbert(sin(x),h), direct_itilbert(sin(x),h)) assert_array_almost_equal(itilbert(sin(2*x),h), direct_itilbert(sin(2*x),h)) class TestHilbert(object): def test_definition(self): for n in [16,17,64,127]: x = arange(n)*2*pi/n y = hilbert(sin(x)) y1 = direct_hilbert(sin(x)) assert_array_almost_equal(y,y1) assert_array_almost_equal(hilbert(sin(2*x)), direct_hilbert(sin(2*x))) def test_tilbert_relation(self): for n in [16,17,64,127]: x = arange(n)*2*pi/n f = sin(x)+cos(2*x)*sin(x) y = hilbert(f) y1 = direct_hilbert(f) assert_array_almost_equal(y,y1) y2 = tilbert(f,h=10) assert_array_almost_equal(y,y2) def test_random_odd(self): for n in [33,65,55]: f = random((n,)) af = sum(f,axis=0)/n f = f-af assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(ihilbert(hilbert(f)),f) assert_array_almost_equal(hilbert(ihilbert(f)),f) def test_random_even(self): for n in [32,64,56]: f = random((n,)) af = sum(f,axis=0)/n f = f-af # zeroing Nyquist mode: f = diff(diff(f,1),-1) assert_almost_equal(sum(f,axis=0),0.0) assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f) assert_array_almost_equal(hilbert(ihilbert(f)),f) class TestIHilbert(object): def test_definition(self): for n in [16,17,64,127]: x = arange(n)*2*pi/n y = ihilbert(sin(x)) y1 = direct_ihilbert(sin(x)) assert_array_almost_equal(y,y1) assert_array_almost_equal(ihilbert(sin(2*x)), direct_ihilbert(sin(2*x))) def test_itilbert_relation(self): for n in [16,17,64,127]: x = arange(n)*2*pi/n f = sin(x)+cos(2*x)*sin(x) y = ihilbert(f) y1 = direct_ihilbert(f) assert_array_almost_equal(y,y1) y2 = itilbert(f,h=10) assert_array_almost_equal(y,y2) class TestShift(object): def test_definition(self): for n in [18,17,64,127,32,2048,256]: x = arange(n)*2*pi/n for a in [0.1,3]: assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a)) assert_array_almost_equal(shift(sin(x),a),sin(x+a)) assert_array_almost_equal(shift(cos(x),a),cos(x+a)) assert_array_almost_equal(shift(cos(2*x)+sin(x),a), cos(2*(x+a))+sin(x+a)) assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a))) assert_array_almost_equal(shift(sin(x),2*pi),sin(x)) assert_array_almost_equal(shift(sin(x),pi),-sin(x)) assert_array_almost_equal(shift(sin(x),pi/2),cos(x)) class TestOverwrite(object): """Check input overwrite behavior """ real_dtypes = [np.float32, np.float64] dtypes = real_dtypes + [np.complex64, np.complex128] def _check(self, x, routine, *args, **kwargs): x2 = x.copy() routine(x2, *args, **kwargs) sig = routine.__name__ if args: sig += repr(args) if kwargs: sig += repr(kwargs) assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) def _check_1d(self, routine, dtype, shape, *args, **kwargs): np.random.seed(1234) if np.issubdtype(dtype, np.complexfloating): data = np.random.randn(*shape) + 1j*np.random.randn(*shape) else: data = np.random.randn(*shape) data = data.astype(dtype) self._check(data, routine, *args, **kwargs) def test_diff(self): for dtype in self.dtypes: self._check_1d(diff, dtype, (16,)) def test_tilbert(self): for dtype in self.dtypes: self._check_1d(tilbert, dtype, (16,), 1.6) def test_itilbert(self): for dtype in self.dtypes: self._check_1d(itilbert, dtype, (16,), 1.6) def test_hilbert(self): for dtype in self.dtypes: self._check_1d(hilbert, dtype, (16,)) def test_cs_diff(self): for dtype in self.dtypes: self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0) def test_sc_diff(self): for dtype in self.dtypes: self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0) def test_ss_diff(self): for dtype in self.dtypes: self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0) def test_cc_diff(self): for dtype in self.dtypes: self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0) def test_shift(self): for dtype in self.dtypes: self._check_1d(shift, dtype, (16,), 1.0)
13,511
34.279373
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/test_helper.py
# Created by Pearu Peterson, September 2002 from __future__ import division, print_function, absolute_import __usage__ = """ Build fftpack: python setup_fftpack.py build Run tests if scipy is installed: python -c 'import scipy;scipy.fftpack.test(<level>)' Run tests if fftpack is not installed: python tests/test_helper.py [<level>] """ from numpy.testing import (assert_array_almost_equal, assert_equal, assert_) from scipy.fftpack import fftshift,ifftshift,fftfreq,rfftfreq from scipy.fftpack.helper import next_fast_len from numpy import pi, random class TestFFTShift(object): def test_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] y = [-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) x = [0,1,2,3,4,-5,-4,-3,-2,-1] y = [-5,-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) def test_inverse(self): for n in [1,4,9,100,211]: x = random.random((n,)) assert_array_almost_equal(ifftshift(fftshift(x)),x) class TestFFTFreq(object): def test_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] assert_array_almost_equal(9*fftfreq(9),x) assert_array_almost_equal(9*pi*fftfreq(9,pi),x) x = [0,1,2,3,4,-5,-4,-3,-2,-1] assert_array_almost_equal(10*fftfreq(10),x) assert_array_almost_equal(10*pi*fftfreq(10,pi),x) class TestRFFTFreq(object): def test_definition(self): x = [0,1,1,2,2,3,3,4,4] assert_array_almost_equal(9*rfftfreq(9),x) assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) x = [0,1,1,2,2,3,3,4,4,5] assert_array_almost_equal(10*rfftfreq(10),x) assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) class TestNextOptLen(object): def test_next_opt_len(self): random.seed(1234) def nums(): for j in range(1, 1000): yield j yield 2**5 * 3**5 * 4**5 + 1 for n in nums(): m = next_fast_len(n) msg = "n=%d, m=%d" % (n, m) assert_(m >= n, msg) # check regularity k = m for d in [2, 3, 5]: while True: a, b = divmod(k, d) if b == 0: k = a else: break assert_equal(k, 1, err_msg=msg) def test_next_opt_len_strict(self): hams = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15, 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000, 510183360: 510183360, 510183360 + 1: 512000000, 511000000: 512000000, 854296875: 854296875, 854296875 + 1: 859963392, 196608000000: 196608000000, 196608000000 + 1: 196830000000, 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208, 206391214080000: 206391214080000, 206391214080000 + 1: 206624260800000, 470184984576000: 470184984576000, 470184984576000 + 1: 470715894135000, 7222041363087360: 7222041363087360, 7222041363087360 + 1: 7230196133913600, # power of 5 5**23 11920928955078125: 11920928955078125, 11920928955078125 - 1: 11920928955078125, # power of 3 3**34 16677181699666569: 16677181699666569, 16677181699666569 - 1: 16677181699666569, # power of 2 2**54 18014398509481984: 18014398509481984, 18014398509481984 - 1: 18014398509481984, # above this, int(ceil(n)) == int(ceil(n+1)) 19200000000000000: 19200000000000000, 19200000000000000 + 1: 19221679687500000, 288230376151711744: 288230376151711744, 288230376151711744 + 1: 288325195312500000, 288325195312500000 - 1: 288325195312500000, 288325195312500000: 288325195312500000, 288325195312500000 + 1: 288555831593533440, # power of 3 3**83 3990838394187339929534246675572349035227 - 1: 3990838394187339929534246675572349035227, 3990838394187339929534246675572349035227: 3990838394187339929534246675572349035227, # power of 2 2**135 43556142965880123323311949751266331066368 - 1: 43556142965880123323311949751266331066368, 43556142965880123323311949751266331066368: 43556142965880123323311949751266331066368, # power of 5 5**57 6938893903907228377647697925567626953125 - 1: 6938893903907228377647697925567626953125, 6938893903907228377647697925567626953125: 6938893903907228377647697925567626953125, # http://www.drdobbs.com/228700538 # 2**96 * 3**1 * 5**13 290142196707511001929482240000000000000 - 1: 290142196707511001929482240000000000000, 290142196707511001929482240000000000000: 290142196707511001929482240000000000000, 290142196707511001929482240000000000000 + 1: 290237644800000000000000000000000000000, # 2**36 * 3**69 * 5**7 4479571262811807241115438439905203543080960000000 - 1: 4479571262811807241115438439905203543080960000000, 4479571262811807241115438439905203543080960000000: 4479571262811807241115438439905203543080960000000, 4479571262811807241115438439905203543080960000000 + 1: 4480327901140333639941336854183943340032000000000, # 2**37 * 3**44 * 5**42 30774090693237851027531250000000000000000000000000000000000000 - 1: 30774090693237851027531250000000000000000000000000000000000000, 30774090693237851027531250000000000000000000000000000000000000: 30774090693237851027531250000000000000000000000000000000000000, 30774090693237851027531250000000000000000000000000000000000000 + 1: 30778180617309082445871527002041377406962596539492679680000000, } for x, y in hams.items(): assert_equal(next_fast_len(x), y)
6,417
38.617284
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/fftw_dct.c
#include <stdlib.h> #include <stdio.h> #include <fftw3.h> #ifdef DCT_TEST_USE_SINGLE typedef float float_prec; #define PF "%.7f" #define FFTW_PLAN fftwf_plan #define FFTW_MALLOC fftwf_malloc #define FFTW_FREE fftwf_free #define FFTW_PLAN_CREATE fftwf_plan_r2r_1d #define FFTW_EXECUTE fftwf_execute #define FFTW_DESTROY_PLAN fftwf_destroy_plan #define FFTW_CLEANUP fftwf_cleanup #else typedef double float_prec; #define PF "%.18f" #define FFTW_PLAN fftw_plan #define FFTW_MALLOC fftw_malloc #define FFTW_FREE fftw_free #define FFTW_PLAN_CREATE fftw_plan_r2r_1d #define FFTW_EXECUTE fftw_execute #define FFTW_DESTROY_PLAN fftw_destroy_plan #define FFTW_CLEANUP fftw_cleanup #endif enum type { DCT_I = 1, DCT_II = 2, DCT_III = 3, DCT_IV = 4, DST_I = 5, DST_II = 6, DST_III = 7, DST_IV = 8, }; int gen(int type, int sz) { float_prec *a, *b; FFTW_PLAN p; int i, tp; a = FFTW_MALLOC(sizeof(*a) * sz); if (a == NULL) { fprintf(stderr, "failure\n"); exit(EXIT_FAILURE); } b = FFTW_MALLOC(sizeof(*b) * sz); if (b == NULL) { fprintf(stderr, "failure\n"); exit(EXIT_FAILURE); } switch(type) { case DCT_I: tp = FFTW_REDFT00; break; case DCT_II: tp = FFTW_REDFT10; break; case DCT_III: tp = FFTW_REDFT01; break; case DCT_IV: tp = FFTW_REDFT11; break; case DST_I: tp = FFTW_RODFT00; break; case DST_II: tp = FFTW_RODFT10; break; case DST_III: tp = FFTW_RODFT01; break; case DST_IV: tp = FFTW_RODFT11; break; default: fprintf(stderr, "unknown type\n"); exit(EXIT_FAILURE); } switch(type) { case DCT_I: case DCT_II: case DCT_III: case DCT_IV: for(i=0; i < sz; ++i) { a[i] = i; } break; case DST_I: case DST_II: case DST_III: case DST_IV: /* TODO: what should we do for dst's?*/ for(i=0; i < sz; ++i) { a[i] = i; } break; default: fprintf(stderr, "unknown type\n"); exit(EXIT_FAILURE); } p = FFTW_PLAN_CREATE(sz, a, b, tp, FFTW_ESTIMATE); FFTW_EXECUTE(p); FFTW_DESTROY_PLAN(p); for(i=0; i < sz; ++i) { printf(PF"\n", b[i]); } FFTW_FREE(b); FFTW_FREE(a); return 0; } int main(int argc, char* argv[]) { int n, tp; if (argc < 3) { fprintf(stderr, "missing argument: program type n\n"); exit(EXIT_FAILURE); } tp = atoi(argv[1]); n = atoi(argv[2]); gen(tp, n); FFTW_CLEANUP(); return 0; }
3,454
23.856115
70
c
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/fftpack/tests/gendata.m
x0 = linspace(0, 10, 11); x1 = linspace(0, 10, 15); x2 = linspace(0, 10, 16); x3 = linspace(0, 10, 17); x4 = randn(32, 1); x5 = randn(64, 1); x6 = randn(128, 1); x7 = randn(256, 1); y0 = dct(x0); y1 = dct(x1); y2 = dct(x2); y3 = dct(x3); y4 = dct(x4); y5 = dct(x5); y6 = dct(x6); y7 = dct(x7); save('test.mat', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', ... 'y0', 'y1', 'y2', 'y3', 'y4', 'y5', 'y6', 'y7');
432
18.681818
68
m
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('misc',parent_package,top_path) config.add_data_files('*.dat') config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
441
28.466667
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/doccer.py
''' Utilities to allow inserting docstring fragments for common parameters into function and method docstrings''' from __future__ import division, print_function, absolute_import import sys __all__ = ['docformat', 'inherit_docstring_from', 'indentcount_lines', 'filldoc', 'unindent_dict', 'unindent_string'] def docformat(docstring, docdict=None): ''' Fill a function docstring from variables in dictionary Adapt the indent of the inserted docs Parameters ---------- docstring : string docstring from function, possibly with dict formatting strings docdict : dict, optional dictionary with keys that match the dict formatting strings and values that are docstring fragments to be inserted. The indentation of the inserted docstrings is set to match the minimum indentation of the ``docstring`` by adding this indentation to all lines of the inserted string, except the first Returns ------- outstring : string string with requested ``docdict`` strings inserted Examples -------- >>> docformat(' Test string with %(value)s', {'value':'inserted value'}) ' Test string with inserted value' >>> docstring = 'First line\\n Second line\\n %(value)s' >>> inserted_string = "indented\\nstring" >>> docdict = {'value': inserted_string} >>> docformat(docstring, docdict) 'First line\\n Second line\\n indented\\n string' ''' if not docstring: return docstring if docdict is None: docdict = {} if not docdict: return docstring lines = docstring.expandtabs().splitlines() # Find the minimum indent of the main docstring, after first line if len(lines) < 2: icount = 0 else: icount = indentcount_lines(lines[1:]) indent = ' ' * icount # Insert this indent to dictionary docstrings indented = {} for name, dstr in docdict.items(): lines = dstr.expandtabs().splitlines() try: newlines = [lines[0]] for line in lines[1:]: newlines.append(indent+line) indented[name] = '\n'.join(newlines) except IndexError: indented[name] = dstr return docstring % indented def inherit_docstring_from(cls): """ This decorator modifies the decorated function's docstring by replacing occurrences of '%(super)s' with the docstring of the method of the same name from the class `cls`. If the decorated method has no docstring, it is simply given the docstring of `cls`s method. Parameters ---------- cls : Python class or instance A class with a method with the same name as the decorated method. The docstring of the method in this class replaces '%(super)s' in the docstring of the decorated method. Returns ------- f : function The decorator function that modifies the __doc__ attribute of its argument. Examples -------- In the following, the docstring for Bar.func created using the docstring of `Foo.func`. >>> class Foo(object): ... def func(self): ... '''Do something useful.''' ... return ... >>> class Bar(Foo): ... @inherit_docstring_from(Foo) ... def func(self): ... '''%(super)s ... Do it fast. ... ''' ... return ... >>> b = Bar() >>> b.func.__doc__ 'Do something useful.\n Do it fast.\n ' """ def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ func_docstring = func.__doc__ if func_docstring is None: func.__doc__ = cls_docstring else: new_docstring = func_docstring % dict(super=cls_docstring) func.__doc__ = new_docstring return func return _doc def extend_notes_in_docstring(cls, notes): """ This decorator replaces the decorated function's docstring with the docstring from corresponding method in `cls`. It extends the 'Notes' section of that docstring to include the given `notes`. """ def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ end_of_notes = cls_docstring.find(' References\n') if end_of_notes == -1: end_of_notes = cls_docstring.find(' Examples\n') if end_of_notes == -1: end_of_notes = len(cls_docstring) func.__doc__ = (cls_docstring[:end_of_notes] + notes + cls_docstring[end_of_notes:]) return func return _doc def replace_notes_in_docstring(cls, notes): """ This decorator replaces the decorated function's docstring with the docstring from corresponding method in `cls`. It replaces the 'Notes' section of that docstring with the given `notes`. """ def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ notes_header = ' Notes\n -----\n' # XXX The following assumes that there is a Notes section. start_of_notes = cls_docstring.find(notes_header) end_of_notes = cls_docstring.find(' References\n') if end_of_notes == -1: end_of_notes = cls_docstring.find(' Examples\n') if end_of_notes == -1: end_of_notes = len(cls_docstring) func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] + notes + cls_docstring[end_of_notes:]) return func return _doc def indentcount_lines(lines): ''' Minimum indent for all lines in line list >>> lines = [' one', ' two', ' three'] >>> indentcount_lines(lines) 1 >>> lines = [] >>> indentcount_lines(lines) 0 >>> lines = [' one'] >>> indentcount_lines(lines) 1 >>> indentcount_lines([' ']) 0 ''' indentno = sys.maxsize for line in lines: stripped = line.lstrip() if stripped: indentno = min(indentno, len(line) - len(stripped)) if indentno == sys.maxsize: return 0 return indentno def filldoc(docdict, unindent_params=True): ''' Return docstring decorator using docdict variable dictionary Parameters ---------- docdict : dictionary dictionary containing name, docstring fragment pairs unindent_params : {False, True}, boolean, optional If True, strip common indentation from all parameters in docdict Returns ------- decfunc : function decorator that applies dictionary to input function docstring ''' if unindent_params: docdict = unindent_dict(docdict) def decorate(f): f.__doc__ = docformat(f.__doc__, docdict) return f return decorate def unindent_dict(docdict): ''' Unindent all strings in a docdict ''' can_dict = {} for name, dstr in docdict.items(): can_dict[name] = unindent_string(dstr) return can_dict def unindent_string(docstring): ''' Set docstring to minimum indent for all lines, including first >>> unindent_string(' two') 'two' >>> unindent_string(' two\\n three') 'two\\n three' ''' lines = docstring.expandtabs().splitlines() icount = indentcount_lines(lines) if icount == 0: return docstring return '\n'.join([line[icount:] for line in lines])
7,515
29.803279
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/pilutil.py
""" A collection of image utilities using the Python Imaging Library (PIL). Note that PIL is not a dependency of SciPy and this module is not available on systems that don't have PIL installed. """ from __future__ import division, print_function, absolute_import # Functions which need the PIL import numpy import tempfile from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis, transpose, iscomplexobj, uint8, issubdtype, array) try: from PIL import Image, ImageFilter except ImportError: import Image import ImageFilter if not hasattr(Image, 'frombytes'): Image.frombytes = Image.fromstring __all__ = ['fromimage', 'toimage', 'imsave', 'imread', 'bytescale', 'imrotate', 'imresize', 'imshow', 'imfilter'] @numpy.deprecate(message="`bytescale` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.") def bytescale(data, cmin=None, cmax=None, high=255, low=0): """ Byte scales an array (image). Byte scaling means converting the input image to uint8 dtype and scaling the range to ``(low, high)`` (default 0-255). If the input image already has dtype uint8, no scaling is done. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- data : ndarray PIL image data array. cmin : scalar, optional Bias scaling of small values. Default is ``data.min()``. cmax : scalar, optional Bias scaling of large values. Default is ``data.max()``. high : scalar, optional Scale max value to `high`. Default is 255. low : scalar, optional Scale min value to `low`. Default is 0. Returns ------- img_array : uint8 ndarray The byte-scaled array. Examples -------- >>> from scipy.misc import bytescale >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ], ... [ 73.88003259, 80.91433048, 4.88878881], ... [ 51.53875334, 34.45808177, 27.5873488 ]]) >>> bytescale(img) array([[255, 0, 236], [205, 225, 4], [140, 90, 70]], dtype=uint8) >>> bytescale(img, high=200, low=100) array([[200, 100, 192], [180, 188, 102], [155, 135, 128]], dtype=uint8) >>> bytescale(img, cmin=0, cmax=255) array([[91, 3, 84], [74, 81, 5], [52, 34, 28]], dtype=uint8) """ if data.dtype == uint8: return data if high > 255: raise ValueError("`high` should be less than or equal to 255.") if low < 0: raise ValueError("`low` should be greater than or equal to 0.") if high < low: raise ValueError("`high` should be greater than or equal to `low`.") if cmin is None: cmin = data.min() if cmax is None: cmax = data.max() cscale = cmax - cmin if cscale < 0: raise ValueError("`cmax` should be larger than `cmin`.") elif cscale == 0: cscale = 1 scale = float(high - low) / cscale bytedata = (data - cmin) * scale + low return (bytedata.clip(low, high) + 0.5).astype(uint8) @numpy.deprecate(message="`imread` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use ``imageio.imread`` instead.") def imread(name, flatten=False, mode=None): """ Read an image from a file as an array. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- name : str or file object The file name or file object to be read. flatten : bool, optional If True, flattens the color layers into a single gray-scale layer. mode : str, optional Mode to convert image to, e.g. ``'RGB'``. See the Notes for more details. Returns ------- imread : ndarray The array obtained by reading the image. Notes ----- `imread` uses the Python Imaging Library (PIL) to read an image. The following notes are from the PIL documentation. `mode` can be one of the following strings: * 'L' (8-bit pixels, black and white) * 'P' (8-bit pixels, mapped to any other mode using a color palette) * 'RGB' (3x8-bit pixels, true color) * 'RGBA' (4x8-bit pixels, true color with transparency mask) * 'CMYK' (4x8-bit pixels, color separation) * 'YCbCr' (3x8-bit pixels, color video format) * 'I' (32-bit signed integer pixels) * 'F' (32-bit floating point pixels) PIL also provides limited support for a few special modes, including 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' (true color with premultiplied alpha). When translating a color image to black and white (mode 'L', 'I' or 'F'), the library uses the ITU-R 601-2 luma transform:: L = R * 299/1000 + G * 587/1000 + B * 114/1000 When `flatten` is True, the image is converted using mode 'F'. When `mode` is not None and `flatten` is True, the image is first converted according to `mode`, and the result is then flattened using mode 'F'. """ im = Image.open(name) return fromimage(im, flatten=flatten, mode=mode) @numpy.deprecate(message="`imsave` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use ``imageio.imwrite`` instead.") def imsave(name, arr, format=None): """ Save an array as an image. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- name : str or file object Output file name or file object. arr : ndarray, MxN or MxNx3 or MxNx4 Array containing image values. If the shape is ``MxN``, the array represents a grey-level image. Shape ``MxNx3`` stores the red, green and blue bands along the last dimension. An alpha layer may be included, specified as the last colour band of an ``MxNx4`` array. format : str Image format. If omitted, the format to use is determined from the file name extension. If a file object was used instead of a file name, this parameter should always be used. Examples -------- Construct an array of gradient intensity values and save to file: >>> from scipy.misc import imsave >>> x = np.zeros((255, 255)) >>> x = np.zeros((255, 255), dtype=np.uint8) >>> x[:] = np.arange(255) >>> imsave('gradient.png', x) Construct an array with three colour bands (R, G, B) and store to file: >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8) >>> rgb[..., 0] = np.arange(255) >>> rgb[..., 1] = 55 >>> rgb[..., 2] = 1 - np.arange(255) >>> imsave('rgb_gradient.png', rgb) """ im = toimage(arr, channel_axis=2) if format is None: im.save(name) else: im.save(name, format) return @numpy.deprecate(message="`fromimage` is deprecated in SciPy 1.0.0. " "and will be removed in 1.2.0.\n" "Use ``np.asarray(im)`` instead.") def fromimage(im, flatten=False, mode=None): """ Return a copy of a PIL image as a numpy array. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- im : PIL image Input image. flatten : bool If true, convert the output to grey-scale. mode : str, optional Mode to convert image to, e.g. ``'RGB'``. See the Notes of the `imread` docstring for more details. Returns ------- fromimage : ndarray The different colour bands/channels are stored in the third dimension, such that a grey-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. """ if not Image.isImageType(im): raise TypeError("Input is not a PIL image.") if mode is not None: if mode != im.mode: im = im.convert(mode) elif im.mode == 'P': # Mode 'P' means there is an indexed "palette". If we leave the mode # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D # containing the indices into the palette, and not a 3-D array # containing the RGB or RGBA values. if 'transparency' in im.info: im = im.convert('RGBA') else: im = im.convert('RGB') if flatten: im = im.convert('F') elif im.mode == '1': # Workaround for crash in PIL. When im is 1-bit, the call array(im) # can cause a seg. fault, or generate garbage. See # https://github.com/scipy/scipy/issues/2138 and # https://github.com/python-pillow/Pillow/issues/350. # # This converts im from a 1-bit image to an 8-bit image. im = im.convert('L') a = array(im) return a _errstr = "Mode is unknown or incompatible with input array shape." @numpy.deprecate(message="`toimage` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use Pillow's ``Image.fromarray`` directly instead.") def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, mode=None, channel_axis=None): """Takes a numpy array and returns a PIL image. This function is only available if Python Imaging Library (PIL) is installed. The mode of the PIL image depends on the array shape and the `pal` and `mode` keywords. For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode is given as 'F' or 'I' in which case a float and/or integer array is made. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Notes ----- For 3-D arrays, the `channel_axis` argument tells which dimension of the array holds the channel data. For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' by default or 'YCbCr' if selected. The numpy array must be either 2 dimensional or 3 dimensional. """ data = asarray(arr) if iscomplexobj(data): raise ValueError("Cannot convert a complex-valued array.") shape = list(data.shape) valid = len(shape) == 2 or ((len(shape) == 3) and ((3 in shape) or (4 in shape))) if not valid: raise ValueError("'arr' does not have a suitable array shape for " "any mode.") if len(shape) == 2: shape = (shape[1], shape[0]) # columns show up first if mode == 'F': data32 = data.astype(numpy.float32) image = Image.frombytes(mode, shape, data32.tostring()) return image if mode in [None, 'L', 'P']: bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) image = Image.frombytes('L', shape, bytedata.tostring()) if pal is not None: image.putpalette(asarray(pal, dtype=uint8).tostring()) # Becomes a mode='P' automagically. elif mode == 'P': # default gray-scale pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] * ones((3,), dtype=uint8)[newaxis, :]) image.putpalette(asarray(pal, dtype=uint8).tostring()) return image if mode == '1': # high input gives threshold for 1 bytedata = (data > high) image = Image.frombytes('1', shape, bytedata.tostring()) return image if cmin is None: cmin = amin(ravel(data)) if cmax is None: cmax = amax(ravel(data)) data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low if mode == 'I': data32 = data.astype(numpy.uint32) image = Image.frombytes(mode, shape, data32.tostring()) else: raise ValueError(_errstr) return image # if here then 3-d array with a 3 or a 4 in the shape length. # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' if channel_axis is None: if (3 in shape): ca = numpy.flatnonzero(asarray(shape) == 3)[0] else: ca = numpy.flatnonzero(asarray(shape) == 4) if len(ca): ca = ca[0] else: raise ValueError("Could not find channel dimension.") else: ca = channel_axis numch = shape[ca] if numch not in [3, 4]: raise ValueError("Channel axis dimension is not valid.") bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) if ca == 2: strdata = bytedata.tostring() shape = (shape[1], shape[0]) elif ca == 1: strdata = transpose(bytedata, (0, 2, 1)).tostring() shape = (shape[2], shape[0]) elif ca == 0: strdata = transpose(bytedata, (1, 2, 0)).tostring() shape = (shape[2], shape[1]) if mode is None: if numch == 3: mode = 'RGB' else: mode = 'RGBA' if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']: raise ValueError(_errstr) if mode in ['RGB', 'YCbCr']: if numch != 3: raise ValueError("Invalid array shape for mode.") if mode in ['RGBA', 'CMYK']: if numch != 4: raise ValueError("Invalid array shape for mode.") # Here we know data and mode is correct image = Image.frombytes(mode, shape, strdata) return image @numpy.deprecate(message="`imrotate` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use ``skimage.transform.rotate`` instead.") def imrotate(arr, angle, interp='bilinear'): """ Rotate an image counter-clockwise by angle degrees. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- arr : ndarray Input array of image to be rotated. angle : float The angle of rotation. interp : str, optional Interpolation - 'nearest' : for nearest neighbor - 'bilinear' : for bilinear - 'lanczos' : for lanczos - 'cubic' : for bicubic - 'bicubic' : for bicubic Returns ------- imrotate : ndarray The rotated array of image. """ arr = asarray(arr) func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} im = toimage(arr) im = im.rotate(angle, resample=func[interp]) return fromimage(im) @numpy.deprecate(message="`imshow` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use ``matplotlib.pyplot.imshow`` instead.") def imshow(arr): """ Simple showing of an image through an external viewer. This function is only available if Python Imaging Library (PIL) is installed. Uses the image viewer specified by the environment variable SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`, to view a temporary file generated from array data. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- arr : ndarray Array of image data to show. Returns ------- None Examples -------- >>> a = np.tile(np.arange(255), (255,1)) >>> from scipy import misc >>> misc.imshow(a) """ im = toimage(arr) fnum, fname = tempfile.mkstemp('.png') try: im.save(fname) except: raise RuntimeError("Error saving temporary image data.") import os os.close(fnum) cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER', 'see') status = os.system("%s %s" % (cmd, fname)) os.unlink(fname) if status != 0: raise RuntimeError('Could not execute image viewer.') @numpy.deprecate(message="`imresize` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use ``skimage.transform.resize`` instead.") def imresize(arr, size, interp='bilinear', mode=None): """ Resize an image. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- arr : ndarray The array of image to be resized. size : int, float or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image (height, width). interp : str, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic'). mode : str, optional The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing. If ``mode=None`` (the default), 2-D images will be treated like ``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays, `mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively. Returns ------- imresize : ndarray The resized array of image. See Also -------- toimage : Implicitly used to convert `arr` according to `mode`. scipy.ndimage.zoom : More generic implementation that does not use PIL. """ im = toimage(arr, mode=mode) ts = type(size) if issubdtype(ts, numpy.signedinteger): percent = size / 100.0 size = tuple((array(im.size)*percent).astype(int)) elif issubdtype(type(size), numpy.floating): size = tuple((array(im.size)*size).astype(int)) else: size = (size[1], size[0]) func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} imnew = im.resize(size, resample=func[interp]) return fromimage(imnew) @numpy.deprecate(message="`imfilter` is deprecated in SciPy 1.0.0, " "and will be removed in 1.2.0.\n" "Use Pillow filtering functionality directly.") def imfilter(arr, ftype): """ Simple filtering of an image. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- arr : ndarray The array of Image in which the filter is to be applied. ftype : str The filter that has to be applied. Legal values are: 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more', 'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'. Returns ------- imfilter : ndarray The array with filter applied. Raises ------ ValueError *Unknown filter type.* If the filter you are trying to apply is unsupported. """ _tdict = {'blur': ImageFilter.BLUR, 'contour': ImageFilter.CONTOUR, 'detail': ImageFilter.DETAIL, 'edge_enhance': ImageFilter.EDGE_ENHANCE, 'edge_enhance_more': ImageFilter.EDGE_ENHANCE_MORE, 'emboss': ImageFilter.EMBOSS, 'find_edges': ImageFilter.FIND_EDGES, 'smooth': ImageFilter.SMOOTH, 'smooth_more': ImageFilter.SMOOTH_MORE, 'sharpen': ImageFilter.SHARPEN } im = toimage(arr) if ftype not in _tdict: raise ValueError("Unknown filter type.") return fromimage(im.filter(_tdict[ftype]))
20,926
32.644695
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/common.py
""" Functions which are common and require SciPy Base and Level 1 SciPy (special, linalg) """ from __future__ import division, print_function, absolute_import from numpy import arange, newaxis, hstack, product, array, frombuffer, load __all__ = ['central_diff_weights', 'derivative', 'ascent', 'face', 'electrocardiogram'] def central_diff_weights(Np, ndiv=1): """ Return weights for an Np-point central derivative. Assumes equally-spaced function points. If weights are in the vector w, then derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) Parameters ---------- Np : int Number of points for the central derivative. ndiv : int, optional Number of divisions. Default is 1. Notes ----- Can be inaccurate for large number of points. """ if Np < ndiv + 1: raise ValueError("Number of points must be at least the derivative order + 1.") if Np % 2 == 0: raise ValueError("The number of points must be odd.") from scipy import linalg ho = Np >> 1 x = arange(-ho,ho+1.0) x = x[:,newaxis] X = x**0.0 for k in range(1,Np): X = hstack([X,x**k]) w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv] return w def derivative(func, x0, dx=1.0, n=1, args=(), order=3): """ Find the n-th derivative of a function at a point. Given a function, use a central difference formula with spacing `dx` to compute the `n`-th derivative at `x0`. Parameters ---------- func : function Input function. x0 : float The point at which `n`-th derivative is found. dx : float, optional Spacing. n : int, optional Order of the derivative. Default is 1. args : tuple, optional Arguments order : int, optional Number of points to use, must be odd. Notes ----- Decreasing the step size too small can result in round-off error. Examples -------- >>> from scipy.misc import derivative >>> def f(x): ... return x**3 + x**2 >>> derivative(f, 1.0, dx=1e-6) 4.9999999999217337 """ if order < n + 1: raise ValueError("'order' (the number of points used to compute the derivative), " "must be at least the derivative order 'n' + 1.") if order % 2 == 0: raise ValueError("'order' (the number of points used to compute the derivative) " "must be odd.") # pre-computed for n=1 and 2 and low-order for speed. if n == 1: if order == 3: weights = array([-1,0,1])/2.0 elif order == 5: weights = array([1,-8,0,8,-1])/12.0 elif order == 7: weights = array([-1,9,-45,0,45,-9,1])/60.0 elif order == 9: weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0 else: weights = central_diff_weights(order,1) elif n == 2: if order == 3: weights = array([1,-2.0,1]) elif order == 5: weights = array([-1,16,-30,16,-1])/12.0 elif order == 7: weights = array([2,-27,270,-490,270,-27,2])/180.0 elif order == 9: weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0 else: weights = central_diff_weights(order,2) else: weights = central_diff_weights(order, n) val = 0.0 ho = order >> 1 for k in range(order): val += weights[k]*func(x0+(k-ho)*dx,*args) return val / product((dx,)*n,axis=0) def ascent(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos The image is derived from accent-to-the-top.jpg at http://www.public-domain-image.com/people-public-domain-images-pictures/ Parameters ---------- None Returns ------- ascent : ndarray convenient image to use for testing and demonstration Examples -------- >>> import scipy.misc >>> ascent = scipy.misc.ascent() >>> ascent.shape (512, 512) >>> ascent.max() 255 >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(ascent) >>> plt.show() """ import pickle import os fname = os.path.join(os.path.dirname(__file__),'ascent.dat') with open(fname, 'rb') as f: ascent = array(pickle.load(f)) return ascent def face(gray=False): """ Get a 1024 x 768, color image of a raccoon face. raccoon-procyon-lotor.jpg at http://www.public-domain-image.com Parameters ---------- gray : bool, optional If True return 8-bit grey-scale image, otherwise return a color image Returns ------- face : ndarray image of a racoon face Examples -------- >>> import scipy.misc >>> face = scipy.misc.face() >>> face.shape (768, 1024, 3) >>> face.max() 255 >>> face.dtype dtype('uint8') >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(face) >>> plt.show() """ import bz2 import os with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f: rawdata = f.read() data = bz2.decompress(rawdata) face = frombuffer(data, dtype='uint8') face.shape = (768, 1024, 3) if gray is True: face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8') return face def electrocardiogram(): """ Load an electrocardiogram as an example for a one-dimensional signal. The returned signal is a 5 minute long electrocardiogram (ECG), a medical recording of the heart's electrical activity, sampled at 360 Hz. Returns ------- ecg : ndarray The electrocardiogram in millivolt (mV) sampled at 360 Hz. Notes ----- The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on PhysioNet [2]_. The excerpt includes noise induced artifacts, typical heartbeats as well as pathological changes. .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 .. versionadded:: 1.1.0 References ---------- .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). (PMID: 11446209); https://doi.org/10.13026/C2F305 .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, PhysioToolkit, and PhysioNet: Components of a New Research Resource for Complex Physiologic Signals. Circulation 101(23):e215-e220; https://doi.org/10.1161/01.CIR.101.23.e215 Examples -------- >>> from scipy.misc import electrocardiogram >>> ecg = electrocardiogram() >>> ecg array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385]) >>> ecg.shape, ecg.mean(), ecg.std() ((108000,), -0.16510875, 0.5992473991177294) As stated the signal features several areas with a different morphology. E.g. the first few seconds show the electrical activity of a heart in normal sinus rhythm as seen below. >>> import matplotlib.pyplot as plt >>> fs = 360 >>> time = np.arange(ecg.size) / fs >>> plt.plot(time, ecg) >>> plt.xlabel("time in s") >>> plt.ylabel("ECG in mV") >>> plt.xlim(9, 10.2) >>> plt.ylim(-1, 1.5) >>> plt.show() After second 16 however, the first premature ventricular contractions, also called extrasystoles, appear. These have a different morphology compared to typical heartbeats. The difference can easily be observed in the following plot. >>> plt.plot(time, ecg) >>> plt.xlabel("time in s") >>> plt.ylabel("ECG in mV") >>> plt.xlim(46.5, 50) >>> plt.ylim(-2, 1.5) >>> plt.show() At several points large artifacts disturb the recording, e.g.: >>> plt.plot(time, ecg) >>> plt.xlabel("time in s") >>> plt.ylabel("ECG in mV") >>> plt.xlim(207, 215) >>> plt.ylim(-2, 3.5) >>> plt.show() Finally, examining the power spectrum reveals that most of the biosignal is made up of lower frequencies. At 60 Hz the noise induced by the mains electricity can be clearly observed. >>> from scipy.signal import welch >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") >>> plt.semilogy(f, Pxx) >>> plt.xlabel("Frequency in Hz") >>> plt.ylabel("Power spectrum of the ECG in mV**2") >>> plt.xlim(f[[0, -1]]) >>> plt.show() """ import os file_path = os.path.join(os.path.dirname(__file__), "ecg.dat") with load(file_path) as file: ecg = file["ecg"].astype(int) # np.uint16 -> int # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain ecg = (ecg - 1024) / 200.0 return ecg
8,982
28.549342
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/__init__.py
""" ========================================== Miscellaneous routines (:mod:`scipy.misc`) ========================================== .. currentmodule:: scipy.misc Various utilities that don't have another home. Note that Pillow (https://python-pillow.org/) is not a dependency of SciPy, but the image manipulation functions indicated in the list below are not available without it. .. autosummary:: :toctree: generated/ ascent - Get example image for processing central_diff_weights - Weights for an n-point central m-th derivative derivative - Find the n-th derivative of a function at a point face - Get example image for processing electrocardiogram - Load an example of a one-dimensional signal. Deprecated functions: .. autosummary:: :toctree: generated/ bytescale - Byte scales an array (image) [requires Pillow] fromimage - Return a copy of a PIL image as a numpy array [requires Pillow] imfilter - Simple filtering of an image [requires Pillow] imread - Read an image file from a filename [requires Pillow] imresize - Resize an image [requires Pillow] imrotate - Rotate an image counter-clockwise [requires Pillow] imsave - Save an array to an image file [requires Pillow] imshow - Simple showing of an image through an external viewer [requires Pillow] toimage - Takes a numpy array and returns a PIL image [requires Pillow] Deprecated aliases: .. autosummary:: :toctree: generated/ comb - Combinations of N things taken k at a time, "N choose k" (imported from `scipy.special`) factorial - The factorial function, ``n! = special.gamma(n+1)`` (imported from `scipy.special`) factorial2 - Double factorial, ``(n!)!`` (imported from `scipy.special`) factorialk - ``(...((n!)!)!...)!`` where there are k '!' (imported from `scipy.special`) logsumexp - Compute the log of the sum of exponentials of input elements (imported from `scipy.special`) pade - Pade approximation to function as the ratio of two polynomials. (imported from `scipy.interpolate`) info - Get help information for a function, class, or module. (imported from `numpy`) source - Print function source code. (imported from `numpy`) who - Print the Numpy arrays in the given dictionary. (imported from `numpy`) """ from __future__ import division, print_function, absolute_import __all__ = ['who', 'source', 'info', 'doccer', 'pade', 'comb', 'factorial', 'factorial2', 'factorialk', 'logsumexp'] from . import doccer from .common import * from numpy import who as _who, source as _source, info as _info import numpy as np from scipy.interpolate._pade import pade as _pade from scipy.special import (comb as _comb, logsumexp as _lsm, factorial as _fact, factorial2 as _fact2, factorialk as _factk) import sys _msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " "`scipy.special.%(name)s` instead.") comb = np.deprecate(_comb, message=_msg % {"name": _comb.__name__}) logsumexp = np.deprecate(_lsm, message=_msg % {"name": _lsm.__name__}) factorial = np.deprecate(_fact, message=_msg % {"name": _fact.__name__}) factorial2 = np.deprecate(_fact2, message=_msg % {"name": _fact2.__name__}) factorialk = np.deprecate(_factk, message=_msg % {"name": _factk.__name__}) _msg = ("Importing `pade` from scipy.misc is deprecated in scipy 1.0.0. Use " "`scipy.interpolate.pade` instead.") pade = np.deprecate(_pade, message=_msg) _msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " "`numpy.%(name)s` instead.") who = np.deprecate(_who, message=_msg % {"name": "who"}) source = np.deprecate(_source, message=_msg % {"name": "source"}) @np.deprecate(message=_msg % {"name": "info.(..., toplevel='scipy')"}) def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'): return _info(object, maxwidth, output, toplevel) info.__doc__ = _info.__doc__ del sys try: from .pilutil import * from . import pilutil __all__ += pilutil.__all__ del pilutil except ImportError: pass from . import common __all__ += common.__all__ del common from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
4,259
36.368421
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/tests/test_common.py
from __future__ import division, print_function, absolute_import import pytest from numpy.testing import assert_equal, assert_allclose, assert_almost_equal from scipy._lib._numpy_compat import suppress_warnings from scipy.misc import pade, logsumexp, face, ascent, electrocardiogram from scipy.special import logsumexp as sc_logsumexp def test_logsumexp(): # make sure logsumexp can be imported from either scipy.misc or # scipy.special with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`logsumexp` is deprecated") assert_allclose(logsumexp([0, 1]), sc_logsumexp([0, 1]), atol=1e-16) def test_pade(): # make sure scipy.misc.pade exists with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`pade` is deprecated") pade([1, 2], 1) def test_face(): assert_equal(face().shape, (768, 1024, 3)) def test_ascent(): assert_equal(ascent().shape, (512, 512)) def test_electrocardiogram(): # Test shape, dtype and stats of signal ecg = electrocardiogram() assert ecg.dtype == float assert_equal(ecg.shape, (108000,)) assert_almost_equal(ecg.mean(), -0.16510875) assert_almost_equal(ecg.std(), 0.5992473991177294)
1,224
28.878049
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/tests/test_doccer.py
''' Some tests for the documenting decorator and support functions ''' from __future__ import division, print_function, absolute_import import sys import pytest from numpy.testing import assert_equal from scipy.misc import doccer # python -OO strips docstrings DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 docstring = \ """Docstring %(strtest1)s %(strtest2)s %(strtest3)s """ param_doc1 = \ """Another test with some indent""" param_doc2 = \ """Another test, one line""" param_doc3 = \ """ Another test with some indent""" doc_dict = {'strtest1':param_doc1, 'strtest2':param_doc2, 'strtest3':param_doc3} filled_docstring = \ """Docstring Another test with some indent Another test, one line Another test with some indent """ def test_unindent(): assert_equal(doccer.unindent_string(param_doc1), param_doc1) assert_equal(doccer.unindent_string(param_doc2), param_doc2) assert_equal(doccer.unindent_string(param_doc3), param_doc1) def test_unindent_dict(): d2 = doccer.unindent_dict(doc_dict) assert_equal(d2['strtest1'], doc_dict['strtest1']) assert_equal(d2['strtest2'], doc_dict['strtest2']) assert_equal(d2['strtest3'], doc_dict['strtest1']) def test_docformat(): udd = doccer.unindent_dict(doc_dict) formatted = doccer.docformat(docstring, udd) assert_equal(formatted, filled_docstring) single_doc = 'Single line doc %(strtest1)s' formatted = doccer.docformat(single_doc, doc_dict) # Note - initial indent of format string does not # affect subsequent indent of inserted parameter assert_equal(formatted, """Single line doc Another test with some indent""") @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") def test_decorator(): # with unindentation of parameters decorator = doccer.filldoc(doc_dict, True) @decorator def func(): """ Docstring %(strtest3)s """ assert_equal(func.__doc__, """ Docstring Another test with some indent """) # without unindentation of parameters decorator = doccer.filldoc(doc_dict, False) @decorator def func(): """ Docstring %(strtest3)s """ assert_equal(func.__doc__, """ Docstring Another test with some indent """) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") def test_inherit_docstring_from(): class Foo(object): def func(self): '''Do something useful.''' return def func2(self): '''Something else.''' class Bar(Foo): @doccer.inherit_docstring_from(Foo) def func(self): '''%(super)sABC''' return @doccer.inherit_docstring_from(Foo) def func2(self): # No docstring. return assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC') assert_equal(Bar.func2.__doc__, Foo.func2.__doc__) bar = Bar() assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC') assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
3,171
24.174603
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/tests/test_pilutil.py
from __future__ import division, print_function, absolute_import import os.path import tempfile import shutil import numpy as np import glob import pytest from pytest import raises as assert_raises from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, assert_) from scipy._lib._numpy_compat import suppress_warnings from scipy import misc from numpy.ma.testutils import assert_mask_equal try: import PIL.Image except ImportError: _have_PIL = False else: _have_PIL = True # Function / method decorator for skipping PIL tests on import failure _pilskip = pytest.mark.skipif(not _have_PIL, reason='Need to import PIL for this test') datapath = os.path.dirname(__file__) @_pilskip class TestPILUtil(object): def test_imresize(self): im = np.random.random((10, 20)) for T in np.sctypes['float'] + [float]: # 1.1 rounds to below 1.1 for float16, 1.101 works with suppress_warnings() as sup: sup.filter(DeprecationWarning) im1 = misc.imresize(im, T(1.101)) assert_equal(im1.shape, (11, 22)) def test_imresize2(self): im = np.random.random((20, 30)) with suppress_warnings() as sup: sup.filter(DeprecationWarning) im2 = misc.imresize(im, (30, 40), interp='bicubic') assert_equal(im2.shape, (30, 40)) def test_imresize3(self): im = np.random.random((15, 30)) with suppress_warnings() as sup: sup.filter(DeprecationWarning) im2 = misc.imresize(im, (30, 60), interp='nearest') assert_equal(im2.shape, (30, 60)) def test_imresize4(self): im = np.array([[1, 2], [3, 4]]) # Check that resizing by target size, float and int are the same with suppress_warnings() as sup: sup.filter(DeprecationWarning) im2 = misc.imresize(im, (4, 4), mode='F') # output size im3 = misc.imresize(im, 2., mode='F') # fraction im4 = misc.imresize(im, 200, mode='F') # percentage assert_equal(im2, im3) assert_equal(im2, im4) def test_imresize5(self): im = np.random.random((25, 15)) with suppress_warnings() as sup: sup.filter(DeprecationWarning) im2 = misc.imresize(im, (30, 60), interp='lanczos') assert_equal(im2.shape, (30, 60)) def test_bytescale(self): x = np.array([0, 1, 2], np.uint8) y = np.array([0, 1, 2]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) assert_equal(misc.bytescale(x), x) assert_equal(misc.bytescale(y), [0, 128, 255]) def test_bytescale_keywords(self): x = np.array([40, 60, 120, 200, 300, 500]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) res_lowhigh = misc.bytescale(x, low=10, high=143) assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143]) res_cmincmax = misc.bytescale(x, cmin=60, cmax=300) assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255]) assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4]) def test_bytescale_cscale_lowhigh(self): a = np.arange(10) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200) expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200] assert_equal(actual, expected) def test_bytescale_mask(self): a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a) expected = [0, 255, 3] assert_equal(expected, actual) assert_mask_equal(a.mask, actual.mask) assert_(isinstance(actual, np.ma.MaskedArray)) def test_bytescale_rounding(self): a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10) expected = [0, 1, 2, 3, 4] assert_equal(actual, expected) def test_bytescale_low_greaterthan_high(self): with assert_raises(ValueError): with suppress_warnings() as sup: sup.filter(DeprecationWarning) misc.bytescale(np.arange(3), low=10, high=5) def test_bytescale_low_lessthan_0(self): with assert_raises(ValueError): with suppress_warnings() as sup: sup.filter(DeprecationWarning) misc.bytescale(np.arange(3), low=-1) def test_bytescale_high_greaterthan_255(self): with assert_raises(ValueError): with suppress_warnings() as sup: sup.filter(DeprecationWarning) misc.bytescale(np.arange(3), high=256) def test_bytescale_low_equals_high(self): a = np.arange(3) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, low=10, high=10) expected = [10, 10, 10] assert_equal(actual, expected) def test_imsave(self): picdir = os.path.join(datapath, "data") for png in glob.iglob(picdir + "/*.png"): with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") sup.filter(DeprecationWarning) img = misc.imread(png) tmpdir = tempfile.mkdtemp() try: fn1 = os.path.join(tmpdir, 'test.png') fn2 = os.path.join(tmpdir, 'testimg') with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") sup.filter(DeprecationWarning) misc.imsave(fn1, img) misc.imsave(fn2, img, 'PNG') with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") sup.filter(DeprecationWarning) data1 = misc.imread(fn1) data2 = misc.imread(fn2) assert_allclose(data1, img) assert_allclose(data2, img) assert_equal(data1.shape, img.shape) assert_equal(data2.shape, img.shape) finally: shutil.rmtree(tmpdir) def check_fromimage(filename, irange, shape): fp = open(filename, "rb") with suppress_warnings() as sup: sup.filter(DeprecationWarning) img = misc.fromimage(PIL.Image.open(fp)) fp.close() imin, imax = irange assert_equal(img.min(), imin) assert_equal(img.max(), imax) assert_equal(img.shape, shape) @_pilskip def test_fromimage(): # Test generator for parametric tests # Tuples in the list are (filename, (datamin, datamax), shape). files = [('icon.png', (0, 255), (48, 48, 4)), ('icon_mono.png', (0, 255), (48, 48, 4)), ('icon_mono_flat.png', (0, 255), (48, 48, 3))] for fn, irange, shape in files: with suppress_warnings() as sup: sup.filter(DeprecationWarning) check_fromimage(os.path.join(datapath, 'data', fn), irange, shape) @_pilskip def test_imread_indexed_png(): # The file `foo3x5x4indexed.png` was created with this array # (3x5 is (height)x(width)): data = np.array([[[127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255]], [[192, 192, 255, 0], [192, 192, 255, 0], [0, 0, 255, 0], [0, 0, 255, 0], [0, 0, 255, 0]], [[0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255]]], dtype=np.uint8) filename = os.path.join(datapath, 'data', 'foo3x5x4indexed.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_array_equal(im, data) @_pilskip def test_imread_1bit(): # box1.png is a 48x48 grayscale image with bit depth 1. # The border pixels are 1 and the rest are 0. filename = os.path.join(datapath, 'data', 'box1.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_equal(im.dtype, np.uint8) expected = np.zeros((48, 48), dtype=np.uint8) # When scaled up from 1 bit to 8 bits, 1 becomes 255. expected[:, 0] = 255 expected[:, -1] = 255 expected[0, :] = 255 expected[-1, :] = 255 assert_equal(im, expected) @_pilskip def test_imread_2bit(): # blocks2bit.png is a 12x12 grayscale image with bit depth 2. # The pattern is 4 square subblocks of size 6x6. Upper left # is all 0, upper right is all 1, lower left is all 2, lower # right is all 3. # When scaled up to 8 bits, the values become [0, 85, 170, 255]. filename = os.path.join(datapath, 'data', 'blocks2bit.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_equal(im.dtype, np.uint8) expected = np.zeros((12, 12), dtype=np.uint8) expected[:6, 6:] = 85 expected[6:, :6] = 170 expected[6:, 6:] = 255 assert_equal(im, expected) @_pilskip def test_imread_4bit(): # pattern4bit.png is a 12(h) x 31(w) grayscale image with bit depth 4. # The value in row j and column i is maximum(j, i) % 16. # When scaled up to 8 bits, the values become [0, 17, 34, ..., 255]. filename = os.path.join(datapath, 'data', 'pattern4bit.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_equal(im.dtype, np.uint8) j, i = np.meshgrid(np.arange(12), np.arange(31), indexing='ij') expected = 17*(np.maximum(j, i) % 16).astype(np.uint8) assert_equal(im, expected)
10,598
36.320423
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/misc/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_testutils.py
from __future__ import division, print_function, absolute_import import os from distutils.version import LooseVersion import functools import numpy as np from numpy.testing import assert_ import pytest import scipy.special as sc __all__ = ['with_special_errors', 'assert_func_equal', 'FuncData'] #------------------------------------------------------------------------------ # Check if a module is present to be used in tests #------------------------------------------------------------------------------ class MissingModule(object): def __init__(self, name): self.name = name def check_version(module, min_ver): if type(module) == MissingModule: return pytest.mark.skip(reason="{} is not installed".format(module.name)) return pytest.mark.skipif(LooseVersion(module.__version__) < LooseVersion(min_ver), reason="{} version >= {} required".format(module.__name__, min_ver)) #------------------------------------------------------------------------------ # Enable convergence and loss of precision warnings -- turn off one by one #------------------------------------------------------------------------------ def with_special_errors(func): """ Enable special function errors (such as underflow, overflow, loss of precision, etc.) """ @functools.wraps(func) def wrapper(*a, **kw): with sc.errstate(all='raise'): res = func(*a, **kw) return res return wrapper #------------------------------------------------------------------------------ # Comparing function values at many data points at once, with helpful # error reports #------------------------------------------------------------------------------ def assert_func_equal(func, results, points, rtol=None, atol=None, param_filter=None, knownfailure=None, vectorized=True, dtype=None, nan_ok=False, ignore_inf_sign=False, distinguish_nan_and_inf=True): if hasattr(points, 'next'): # it's a generator points = list(points) points = np.asarray(points) if points.ndim == 1: points = points[:,None] nparams = points.shape[1] if hasattr(results, '__name__'): # function data = points result_columns = None result_func = results else: # dataset data = np.c_[points, results] result_columns = list(range(nparams, data.shape[1])) result_func = None fdata = FuncData(func, data, list(range(nparams)), result_columns=result_columns, result_func=result_func, rtol=rtol, atol=atol, param_filter=param_filter, knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized, ignore_inf_sign=ignore_inf_sign, distinguish_nan_and_inf=distinguish_nan_and_inf) fdata.check() class FuncData(object): """ Data set for checking a special function. Parameters ---------- func : function Function to test filename : str Input file name param_columns : int or tuple of ints Columns indices in which the parameters to `func` lie. Can be imaginary integers to indicate that the parameter should be cast to complex. result_columns : int or tuple of ints, optional Column indices for expected results from `func`. result_func : callable, optional Function to call to obtain results. rtol : float, optional Required relative tolerance. Default is 5*eps. atol : float, optional Required absolute tolerance. Default is 5*tiny. param_filter : function, or tuple of functions/Nones, optional Filter functions to exclude some parameter ranges. If omitted, no filtering is done. knownfailure : str, optional Known failure error message to raise when the test is run. If omitted, no exception is raised. nan_ok : bool, optional If nan is always an accepted result. vectorized : bool, optional Whether all functions passed in are vectorized. ignore_inf_sign : bool, optional Whether to ignore signs of infinities. (Doesn't matter for complex-valued functions.) distinguish_nan_and_inf : bool, optional If True, treat numbers which contain nans or infs as as equal. Sets ignore_inf_sign to be True. """ def __init__(self, func, data, param_columns, result_columns=None, result_func=None, rtol=None, atol=None, param_filter=None, knownfailure=None, dataname=None, nan_ok=False, vectorized=True, ignore_inf_sign=False, distinguish_nan_and_inf=True): self.func = func self.data = data self.dataname = dataname if not hasattr(param_columns, '__len__'): param_columns = (param_columns,) self.param_columns = tuple(param_columns) if result_columns is not None: if not hasattr(result_columns, '__len__'): result_columns = (result_columns,) self.result_columns = tuple(result_columns) if result_func is not None: raise ValueError("Only result_func or result_columns should be provided") elif result_func is not None: self.result_columns = None else: raise ValueError("Either result_func or result_columns should be provided") self.result_func = result_func self.rtol = rtol self.atol = atol if not hasattr(param_filter, '__len__'): param_filter = (param_filter,) self.param_filter = param_filter self.knownfailure = knownfailure self.nan_ok = nan_ok self.vectorized = vectorized self.ignore_inf_sign = ignore_inf_sign self.distinguish_nan_and_inf = distinguish_nan_and_inf if not self.distinguish_nan_and_inf: self.ignore_inf_sign = True def get_tolerances(self, dtype): if not np.issubdtype(dtype, np.inexact): dtype = np.dtype(float) info = np.finfo(dtype) rtol, atol = self.rtol, self.atol if rtol is None: rtol = 5*info.eps if atol is None: atol = 5*info.tiny return rtol, atol def check(self, data=None, dtype=None): """Check the special function against the data.""" if self.knownfailure: pytest.xfail(reason=self.knownfailure) if data is None: data = self.data if dtype is None: dtype = data.dtype else: data = data.astype(dtype) rtol, atol = self.get_tolerances(dtype) # Apply given filter functions if self.param_filter: param_mask = np.ones((data.shape[0],), np.bool_) for j, filter in zip(self.param_columns, self.param_filter): if filter: param_mask &= list(filter(data[:,j])) data = data[param_mask] # Pick parameters from the correct columns params = [] for j in self.param_columns: if np.iscomplexobj(j): j = int(j.imag) params.append(data[:,j].astype(complex)) else: params.append(data[:,j]) # Helper for evaluating results def eval_func_at_params(func, skip_mask=None): if self.vectorized: got = func(*params) else: got = [] for j in range(len(params[0])): if skip_mask is not None and skip_mask[j]: got.append(np.nan) continue got.append(func(*tuple([params[i][j] for i in range(len(params))]))) got = np.asarray(got) if not isinstance(got, tuple): got = (got,) return got # Evaluate function to be tested got = eval_func_at_params(self.func) # Grab the correct results if self.result_columns is not None: # Correct results passed in with the data wanted = tuple([data[:,icol] for icol in self.result_columns]) else: # Function producing correct results passed in skip_mask = None if self.nan_ok and len(got) == 1: # Don't spend time evaluating what doesn't need to be evaluated skip_mask = np.isnan(got[0]) wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask) # Check the validity of each output returned assert_(len(got) == len(wanted)) for output_num, (x, y) in enumerate(zip(got, wanted)): if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign: pinf_x = np.isinf(x) pinf_y = np.isinf(y) minf_x = np.isinf(x) minf_y = np.isinf(y) else: pinf_x = np.isposinf(x) pinf_y = np.isposinf(y) minf_x = np.isneginf(x) minf_y = np.isneginf(y) nan_x = np.isnan(x) nan_y = np.isnan(y) olderr = np.seterr(all='ignore') try: abs_y = np.absolute(y) abs_y[~np.isfinite(abs_y)] = 0 diff = np.absolute(x - y) diff[~np.isfinite(diff)] = 0 rdiff = diff / np.absolute(y) rdiff[~np.isfinite(rdiff)] = 0 finally: np.seterr(**olderr) tol_mask = (diff <= atol + rtol*abs_y) pinf_mask = (pinf_x == pinf_y) minf_mask = (minf_x == minf_y) nan_mask = (nan_x == nan_y) bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask) point_count = bad_j.size if self.nan_ok: bad_j &= ~nan_x bad_j &= ~nan_y point_count -= (nan_x | nan_y).sum() if not self.distinguish_nan_and_inf and not self.nan_ok: # If nan's are okay we've already covered all these cases inf_x = np.isinf(x) inf_y = np.isinf(y) both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y) bad_j &= ~both_nonfinite point_count -= both_nonfinite.sum() if np.any(bad_j): # Some bad results: inform what, where, and how bad msg = [""] msg.append("Max |adiff|: %g" % diff.max()) msg.append("Max |rdiff|: %g" % rdiff.max()) msg.append("Bad results (%d out of %d) for the following points (in output %d):" % (np.sum(bad_j), point_count, output_num,)) for j in np.where(bad_j)[0]: j = int(j) fmt = lambda x: "%30s" % np.array2string(x[j], precision=18) a = " ".join(map(fmt, params)) b = " ".join(map(fmt, got)) c = " ".join(map(fmt, wanted)) d = fmt(rdiff) msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d)) assert_(False, "\n".join(msg)) def __repr__(self): """Pretty-printing, esp. for Nose output""" if np.any(list(map(np.iscomplexobj, self.param_columns))): is_complex = " (complex)" else: is_complex = "" if self.dataname: return "<Data for %s%s: %s>" % (self.func.__name__, is_complex, os.path.basename(self.dataname)) else: return "<Data for %s%s>" % (self.func.__name__, is_complex)
11,848
36.261006
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_generate_pyx.py
""" python _generate_pyx.py Generate Ufunc definition source files for scipy.special. Produces files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython. This will generate both calls to PyUFunc_FromFuncAndData and the required ufunc inner loops. The functions signatures are contained in 'functions.json', the syntax for a function signature is <function>: <name> ':' <input> '*' <output> '->' <retval> '*' <ignored_retval> <input>: <typecode>* <output>: <typecode>* <retval>: <typecode>? <ignored_retval>: <typecode>? <headers>: <header_name> [',' <header_name>]* The input parameter types are denoted by single character type codes, according to 'f': 'float' 'd': 'double' 'g': 'long double' 'F': 'float complex' 'D': 'double complex' 'G': 'long double complex' 'i': 'int' 'l': 'long' 'v': 'void' If multiple kernel functions are given for a single ufunc, the one which is used is determined by the standard ufunc mechanism. Kernel functions that are listed first are also matched first against the ufunc input types, so functions listed earlier take precedence. In addition, versions with casted variables, such as d->f,D->F and i->d are automatically generated. There should be either a single header that contains all of the kernel functions listed, or there should be one header for each kernel function. Cython pxd files are allowed in addition to .h files. Cython functions may use fused types, but the names in the list should be the specialized ones, such as 'somefunc[float]'. Function coming from C++ should have ``++`` appended to the name of the header. Floating-point exceptions inside these Ufuncs are converted to special function errors --- which are separately controlled by the user, and off by default, as they are usually not especially useful for the user. The C++ module -------------- In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is generated. This module only exports function pointers that are to be used when constructing some of the ufuncs in ``_ufuncs``. The function pointers are exported via Cython's standard mechanism. This mainly avoids build issues --- Python distutils has no way to figure out what to do if you want to link both C++ and Fortran code in the same shared library. """ from __future__ import division, print_function, absolute_import #--------------------------------------------------------------------------------- # Extra code #--------------------------------------------------------------------------------- UFUNCS_EXTRA_CODE_COMMON = """\ # This file is automatically generated by _generate_pyx.py. # Do not edit manually! from __future__ import absolute_import include "_ufuncs_extra_code_common.pxi" """ UFUNCS_EXTRA_CODE = """\ include "_ufuncs_extra_code.pxi" """ UFUNCS_EXTRA_CODE_BOTTOM = """\ # # Aliases # jn = jv """ CYTHON_SPECIAL_PXD = """\ # This file is automatically generated by _generate_pyx.py. # Do not edit manually! """ CYTHON_SPECIAL_PYX = """\ # This file is automatically generated by _generate_pyx.py. # Do not edit manually! \"\"\" .. highlight:: cython ================================ Cython API for Special Functions ================================ Scalar, typed versions of many of the functions in ``scipy.special`` can be accessed directly from Cython; the complete list is given below. Functions are overloaded using Cython fused types so their names match their ufunc counterpart. The module follows the following conventions: - If a function's ufunc counterpart returns multiple values, then the function returns its outputs via pointers in the final arguments - If a function's ufunc counterpart returns a single value, then the function's output is returned directly. The module is usable from Cython via:: cimport scipy.special.cython_special Error Handling ============== Functions can indicate an error by returning ``nan``; however they cannot emit warnings like their counterparts in ``scipy.special``. Available Functions =================== FUNCLIST \"\"\" from __future__ import absolute_import include "_cython_special.pxi" """ #--------------------------------------------------------------------------------- # Code generation #--------------------------------------------------------------------------------- import os import optparse import re import textwrap import itertools import numpy import json BASE_DIR = os.path.abspath(os.path.dirname(__file__)) add_newdocs = __import__('add_newdocs') CY_TYPES = { 'f': 'float', 'd': 'double', 'g': 'long double', 'F': 'float complex', 'D': 'double complex', 'G': 'long double complex', 'i': 'int', 'l': 'long', 'v': 'void', } C_TYPES = { 'f': 'npy_float', 'd': 'npy_double', 'g': 'npy_longdouble', 'F': 'npy_cfloat', 'D': 'npy_cdouble', 'G': 'npy_clongdouble', 'i': 'npy_int', 'l': 'npy_long', 'v': 'void', } TYPE_NAMES = { 'f': 'NPY_FLOAT', 'd': 'NPY_DOUBLE', 'g': 'NPY_LONGDOUBLE', 'F': 'NPY_CFLOAT', 'D': 'NPY_CDOUBLE', 'G': 'NPY_CLONGDOUBLE', 'i': 'NPY_INT', 'l': 'NPY_LONG', } CYTHON_SPECIAL_BENCHFUNCS = { 'airy': ['d*dddd', 'D*DDDD'], 'beta': ['dd'], 'erf': ['d', 'D'], 'exprel': ['d'], 'gamma': ['d', 'D'], 'jv': ['dd', 'dD'], 'loggamma': ['D'], 'logit': ['d'], 'psi': ['d', 'D'], } def underscore(arg): return arg.replace(" ", "_") def cast_order(c): return ['ilfdgFDG'.index(x) for x in c] # These downcasts will cause the function to return NaNs, unless the # values happen to coincide exactly. DANGEROUS_DOWNCAST = set([ ('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'), ('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'), ('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'), ('f', 'i'), ('f', 'l'), ('d', 'i'), ('d', 'l'), ('g', 'i'), ('g', 'l'), ('l', 'i'), ]) NAN_VALUE = { 'f': 'NPY_NAN', 'd': 'NPY_NAN', 'g': 'NPY_NAN', 'F': 'NPY_NAN', 'D': 'NPY_NAN', 'G': 'NPY_NAN', 'i': '0xbad0bad0', 'l': '0xbad0bad0', } def generate_loop(func_inputs, func_outputs, func_retval, ufunc_inputs, ufunc_outputs): """ Generate a UFunc loop function that calls a function given as its data parameter with the specified input and output arguments and return value. This function can be passed to PyUFunc_FromFuncAndData. Parameters ---------- func_inputs, func_outputs, func_retval : str Signature of the function to call, given as type codes of the input, output and return value arguments. These 1-character codes are given according to the CY_TYPES and TYPE_NAMES lists above. The corresponding C function signature to be called is: retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...); If len(ufunc_outputs) == len(func_outputs)+1, the return value is treated as the first output argument. Otherwise, the return value is ignored. ufunc_inputs, ufunc_outputs : str Ufunc input and output signature. This does not have to exactly match the function signature, as long as the type casts work out on the C level. Returns ------- loop_name Name of the generated loop function. loop_body Generated C code for the loop. """ if len(func_inputs) != len(ufunc_inputs): raise ValueError("Function and ufunc have different number of inputs") if len(func_outputs) != len(ufunc_outputs) and not ( func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)): raise ValueError("Function retval and ufunc outputs don't match") name = "loop_%s_%s_%s_As_%s_%s" % ( func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs ) body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:\n" % name body += " cdef np.npy_intp i, n = dims[0]\n" body += " cdef void *func = (<void**>data)[0]\n" body += " cdef char *func_name = <char*>(<void**>data)[1]\n" for j in range(len(ufunc_inputs)): body += " cdef char *ip%d = args[%d]\n" % (j, j) for j in range(len(ufunc_outputs)): body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs)) ftypes = [] fvars = [] outtypecodes = [] for j in range(len(func_inputs)): ftypes.append(CY_TYPES[func_inputs[j]]) fvars.append("<%s>(<%s*>ip%d)[0]" % ( CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j)) if len(func_outputs)+1 == len(ufunc_outputs): func_joff = 1 outtypecodes.append(func_retval) body += " cdef %s ov0\n" % (CY_TYPES[func_retval],) else: func_joff = 0 for j, outtype in enumerate(func_outputs): body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff) ftypes.append("%s *" % CY_TYPES[outtype]) fvars.append("&ov%d" % (j+func_joff)) outtypecodes.append(outtype) body += " for i in range(n):\n" if len(func_outputs)+1 == len(ufunc_outputs): rv = "ov0 = " else: rv = "" funcall = " %s(<%s(*)(%s) nogil>func)(%s)\n" % ( rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars)) # Cast-check inputs and call function input_checks = [] for j in range(len(func_inputs)): if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST: chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % ( CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j, CY_TYPES[ufunc_inputs[j]], j) input_checks.append(chk) if input_checks: body += " if %s:\n" % (" and ".join(input_checks)) body += " " + funcall body += " else:\n" body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n" for j, outtype in enumerate(outtypecodes): body += " ov%d = <%s>%s\n" % ( j, CY_TYPES[outtype], NAN_VALUE[outtype]) else: body += funcall # Assign and cast-check output values for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)): if (fouttype, outtype) in DANGEROUS_DOWNCAST: body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j) body += " (<%s *>op%d)[0] = <%s>ov%d\n" % ( CY_TYPES[outtype], j, CY_TYPES[outtype], j) body += " else:\n" body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n" body += " (<%s *>op%d)[0] = <%s>%s\n" % ( CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype]) else: body += " (<%s *>op%d)[0] = <%s>ov%d\n" % ( CY_TYPES[outtype], j, CY_TYPES[outtype], j) for j in range(len(ufunc_inputs)): body += " ip%d += steps[%d]\n" % (j, j) for j in range(len(ufunc_outputs)): body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs)) body += " sf_error.check_fpe(func_name)\n" return name, body def generate_fused_type(codes): """ Generate name of and cython code for a fused type. Parameters ---------- typecodes : str Valid inputs to CY_TYPES (i.e. f, d, g, ...). """ cytypes = map(lambda x: CY_TYPES[x], codes) name = codes + "_number_t" declaration = ["ctypedef fused " + name + ":"] for cytype in cytypes: declaration.append(" " + cytype) declaration = "\n".join(declaration) return name, declaration def generate_bench(name, codes): tab = " "*4 top, middle, end = [], [], [] tmp = codes.split("*") if len(tmp) > 1: incodes = tmp[0] outcodes = tmp[1] else: incodes = tmp[0] outcodes = "" inargs, inargs_and_types = [], [] for n, code in enumerate(incodes): arg = "x{}".format(n) inargs.append(arg) inargs_and_types.append("{} {}".format(CY_TYPES[code], arg)) line = "def {{}}(int N, {}):".format(", ".join(inargs_and_types)) top.append(line) top.append(tab + "cdef int n") outargs = [] for n, code in enumerate(outcodes): arg = "y{}".format(n) outargs.append("&{}".format(arg)) line = "cdef {} {}".format(CY_TYPES[code], arg) middle.append(tab + line) end.append(tab + "for n in range(N):") end.append(2*tab + "{}({})") pyfunc = "_bench_{}_{}_{}".format(name, incodes, "py") cyfunc = "_bench_{}_{}_{}".format(name, incodes, "cy") pytemplate = "\n".join(top + end) cytemplate = "\n".join(top + middle + end) pybench = pytemplate.format(pyfunc, "_ufuncs." + name, ", ".join(inargs)) cybench = cytemplate.format(cyfunc, name, ", ".join(inargs + outargs)) return pybench, cybench def generate_doc(name, specs): tab = " "*4 doc = ["- :py:func:`~scipy.special.{}`::\n".format(name)] for spec in specs: incodes, outcodes = spec.split("->") incodes = incodes.split("*") intypes = list(map(lambda x: CY_TYPES[x], incodes[0])) if len(incodes) > 1: types = map(lambda x: "{} *".format(CY_TYPES[x]), incodes[1]) intypes.extend(types) outtype = CY_TYPES[outcodes] line = "{} {}({})".format(outtype, name, ", ".join(intypes)) doc.append(2*tab + line) doc[-1] = "{}\n".format(doc[-1]) doc = "\n".join(doc) return doc def npy_cdouble_from_double_complex(var): """Cast a cython double complex to a numpy cdouble.""" res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var) return res def double_complex_from_npy_cdouble(var): """Cast a numpy cdouble to a cython double complex.""" res = "_complexstuff.double_complex_from_npy_cdouble({})".format(var) return res def iter_variants(inputs, outputs): """ Generate variants of UFunc signatures, by changing variable types, within the limitation that the corresponding C types casts still work out. This does not generate all possibilities, just the ones required for the ufunc to work properly with the most common data types. Parameters ---------- inputs, outputs : str UFunc input and output signature strings Yields ------ new_input, new_output : str Modified input and output strings. Also the original input/output pair is yielded. """ maps = [ # always use long instead of int (more common type on 64-bit) ('i', 'l'), ] # float32-preserving signatures if not ('i' in inputs or 'l' in inputs): # Don't add float32 versions of ufuncs with integer arguments, as this # can lead to incorrect dtype selection if the integer arguments are # arrays, but float arguments are scalars. # For instance sph_harm(0,[0],0,0).dtype == complex64 # This may be a Numpy bug, but we need to work around it. # cf. gh-4895, https://github.com/numpy/numpy/issues/5895 maps = maps + [(a + 'dD', b + 'fF') for a, b in maps] # do the replacements for src, dst in maps: new_inputs = inputs new_outputs = outputs for a, b in zip(src, dst): new_inputs = new_inputs.replace(a, b) new_outputs = new_outputs.replace(a, b) yield new_inputs, new_outputs class Func(object): """ Base class for Ufunc and FusedFunc. """ def __init__(self, name, signatures): self.name = name self.signatures = [] self.function_name_overrides = {} for header in signatures.keys(): for name, sig in signatures[header].items(): inarg, outarg, ret = self._parse_signature(sig) self.signatures.append((name, inarg, outarg, ret, header)) def _parse_signature(self, sig): m = re.match(r"\s*([fdgFDGil]*)\s*\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig) if m: inarg, outarg, ret = [x.strip() for x in m.groups()] if ret.count('*') > 1: raise ValueError("{}: Invalid signature: {}".format(self.name, sig)) return inarg, outarg, ret m = re.match(r"\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig) if m: inarg, ret = [x.strip() for x in m.groups()] return inarg, "", ret raise ValueError("{}: Invalid signature: {}".format(self.name, sig)) def get_prototypes(self, nptypes_for_h=False): prototypes = [] for func_name, inarg, outarg, ret, header in self.signatures: ret = ret.replace('*', '') c_args = ([C_TYPES[x] for x in inarg] + [C_TYPES[x] + ' *' for x in outarg]) cy_args = ([CY_TYPES[x] for x in inarg] + [CY_TYPES[x] + ' *' for x in outarg]) c_proto = "%s (*)(%s)" % (C_TYPES[ret], ", ".join(c_args)) if header.endswith("h") and nptypes_for_h: cy_proto = c_proto + "nogil" else: cy_proto = "%s (*)(%s) nogil" % (CY_TYPES[ret], ", ".join(cy_args)) prototypes.append((func_name, c_proto, cy_proto, header)) return prototypes def cython_func_name(self, c_name, specialized=False, prefix="_func_", override=True): # act on function name overrides if override and c_name in self.function_name_overrides: c_name = self.function_name_overrides[c_name] prefix = "" # support fused types m = re.match(r'^(.*?)(\[.*\])$', c_name) if m: c_base_name, fused_part = m.groups() else: c_base_name, fused_part = c_name, "" if specialized: return "%s%s%s" % (prefix, c_base_name, fused_part.replace(' ', '_')) else: return "%s%s" % (prefix, c_base_name,) class Ufunc(Func): """ Ufunc signature, restricted format suitable for special functions. Parameters ---------- name Name of the ufunc to create signature String of form 'func: fff*ff->f, func2: ddd->*i' describing the C-level functions and types of their input arguments and return values. The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval' Attributes ---------- name : str Python name for the Ufunc signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name) List of parsed signatures doc : str Docstring, obtained from add_newdocs function_name_overrides : dict of str->str Overrides for the function names in signatures """ def __init__(self, name, signatures): super(Ufunc, self).__init__(name, signatures) self.doc = add_newdocs.get("scipy.special." + name) if self.doc is None: raise ValueError("No docstring for ufunc %r" % name) self.doc = textwrap.dedent(self.doc).strip() def _get_signatures_and_loops(self, all_loops): inarg_num = None outarg_num = None seen = set() variants = [] def add_variant(func_name, inarg, outarg, ret, inp, outp): if inp in seen: return seen.add(inp) sig = (func_name, inp, outp) if "v" in outp: raise ValueError("%s: void signature %r" % (self.name, sig)) if len(inp) != inarg_num or len(outp) != outarg_num: raise ValueError("%s: signature %r does not have %d/%d input/output args" % ( self.name, sig, inarg_num, outarg_num)) loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp) all_loops[loop_name] = loop variants.append((func_name, loop_name, inp, outp)) # First add base variants for func_name, inarg, outarg, ret, header in self.signatures: outp = re.sub(r'\*.*', '', ret) + outarg ret = ret.replace('*', '') if inarg_num is None: inarg_num = len(inarg) outarg_num = len(outp) inp, outp = list(iter_variants(inarg, outp))[0] add_variant(func_name, inarg, outarg, ret, inp, outp) # Then the supplementary ones for func_name, inarg, outarg, ret, header in self.signatures: outp = re.sub(r'\*.*', '', ret) + outarg ret = ret.replace('*', '') for inp, outp in iter_variants(inarg, outp): add_variant(func_name, inarg, outarg, ret, inp, outp) # Then sort variants to input argument cast order # -- the sort is stable, so functions earlier in the signature list # are still preferred variants.sort(key=lambda v: cast_order(v[2])) return variants, inarg_num, outarg_num def generate(self, all_loops): toplevel = "" variants, inarg_num, outarg_num = self._get_signatures_and_loops(all_loops) loops = [] funcs = [] types = [] for func_name, loop_name, inputs, outputs in variants: for x in inputs: types.append(TYPE_NAMES[x]) for x in outputs: types.append(TYPE_NAMES[x]) loops.append(loop_name) funcs.append(func_name) toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops)) toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs)) toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs)) toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types)) toplevel += 'cdef char *ufunc_%s_doc = (\n "%s")\n' % ( self.name, self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "') ) for j, function in enumerate(loops): toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function) for j, type in enumerate(types): toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type) for j, func in enumerate(funcs): toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j, self.cython_func_name(func, specialized=True)) toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j, self.name) for j, func in enumerate(funcs): toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % ( self.name, j, self.name, j) toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, ' 'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, ' '"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num), inarg_num, outarg_num) ).replace('@', self.name) return toplevel class FusedFunc(Func): """ Generate code for a fused-type special function that can be cimported in cython. """ def __init__(self, name, signatures): super(FusedFunc, self).__init__(name, signatures) self.doc = "See the documentation for scipy.special." + self.name # "codes" are the keys for CY_TYPES self.incodes, self.outcodes = self._get_codes() self.fused_types = set() self.intypes, infused_types = self._get_types(self.incodes) self.fused_types.update(infused_types) self.outtypes, outfused_types = self._get_types(self.outcodes) self.fused_types.update(outfused_types) self.invars, self.outvars = self._get_vars() def _get_codes(self): inarg_num, outarg_num = None, None all_inp, all_outp = [], [] for _, inarg, outarg, ret, _ in self.signatures: outp = re.sub(r'\*.*', '', ret) + outarg if inarg_num is None: inarg_num = len(inarg) outarg_num = len(outp) inp, outp = list(iter_variants(inarg, outp))[0] all_inp.append(inp) all_outp.append(outp) incodes = [] for n in range(inarg_num): codes = unique(map(lambda x: x[n], all_inp)) codes.sort() incodes.append(''.join(codes)) outcodes = [] for n in range(outarg_num): codes = unique(map(lambda x: x[n], all_outp)) codes.sort() outcodes.append(''.join(codes)) return tuple(incodes), tuple(outcodes) def _get_types(self, codes): all_types = [] fused_types = set() for code in codes: if len(code) == 1: # It's not a fused type all_types.append((CY_TYPES[code], code)) else: # It's a fused type fused_type, dec = generate_fused_type(code) fused_types.add(dec) all_types.append((fused_type, code)) return all_types, fused_types def _get_vars(self): invars = [] for n in range(len(self.intypes)): invars.append("x{}".format(n)) outvars = [] for n in range(len(self.outtypes)): outvars.append("y{}".format(n)) return invars, outvars def _get_conditional(self, types, codes, adverb): """Generate an if/elif/else clause that selects a specialization of fused types. """ clauses = [] seen = set() for (typ, typcode), code in zip(types, codes): if len(typcode) == 1: continue if typ not in seen: clauses.append("{} is {}".format(typ, underscore(CY_TYPES[code]))) seen.add(typ) if clauses and adverb != "else": line = "{} {}:".format(adverb, " and ".join(clauses)) elif clauses and adverb == "else": line = "else:" else: line = None return line def _get_incallvars(self, intypes, c): """Generate pure input variables to a specialization, i.e. variables that aren't used to return a value. """ incallvars = [] for n, intype in enumerate(intypes): var = self.invars[n] if c and intype == "double complex": var = npy_cdouble_from_double_complex(var) incallvars.append(var) return incallvars def _get_outcallvars(self, outtypes, c): """Generate output variables to a specialization, i.e. pointers that are used to return values. """ outcallvars, tmpvars, casts = [], [], [] # If there are more out variables than out types, we want the # tail of the out variables start = len(self.outvars) - len(outtypes) outvars = self.outvars[start:] for n, (var, outtype) in enumerate(zip(outvars, outtypes)): if c and outtype == "double complex": tmp = "tmp{}".format(n) tmpvars.append(tmp) outcallvars.append("&{}".format(tmp)) tmpcast = double_complex_from_npy_cdouble(tmp) casts.append("{}[0] = {}".format(var, tmpcast)) else: outcallvars.append("{}".format(var)) return outcallvars, tmpvars, casts def _get_nan_decs(self): """Set all variables to nan for specializations of fused types for which don't have signatures. """ # Set non fused-type variables to nan tab = " "*4 fused_types, lines = [], [tab + "else:"] seen = set() for outvar, outtype, code in zip(self.outvars, self.outtypes, self.outcodes): if len(code) == 1: line = "{}[0] = {}".format(outvar, NAN_VALUE[code]) lines.append(2*tab + line) else: fused_type = outtype name, _ = fused_type if name not in seen: fused_types.append(fused_type) seen.add(name) if not fused_types: return lines # Set fused-type variables to nan all_codes = [] for fused_type in fused_types: _, codes = fused_type all_codes.append(codes) all_codes = tuple(all_codes) codelens = list(map(lambda x: len(x), all_codes)) last = numpy.product(codelens) - 1 for m, codes in enumerate(itertools.product(*all_codes)): fused_codes, decs = [], [] for n, fused_type in enumerate(fused_types): code = codes[n] fused_codes.append(underscore(CY_TYPES[code])) for nn, outvar in enumerate(self.outvars): if self.outtypes[nn] == fused_type: line = "{}[0] = {}".format(outvar, NAN_VALUE[code]) decs.append(line) if m == 0: adverb = "if" elif m == last: adverb = "else" else: adverb = "elif" cond = self._get_conditional(fused_types, codes, adverb) lines.append(2*tab + cond) lines.extend(map(lambda x: 3*tab + x, decs)) return lines def _get_tmp_decs(self, all_tmpvars): """Generate the declarations of any necessary temporary variables. """ tab = " "*4 tmpvars = list(all_tmpvars) tmpvars.sort() tmpdecs = [] for tmpvar in tmpvars: line = "cdef npy_cdouble {}".format(tmpvar) tmpdecs.append(tab + line) return tmpdecs def _get_python_wrap(self): """Generate a python wrapper for functions which pass their arguments as pointers. """ tab = " "*4 body, callvars = [], [] for (intype, _), invar in zip(self.intypes, self.invars): callvars.append("{} {}".format(intype, invar)) line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars)) body.append(line) for (outtype, _), outvar in zip(self.outtypes, self.outvars): line = "cdef {} {}".format(outtype, outvar) body.append(tab + line) addr_outvars = map(lambda x: "&{}".format(x), self.outvars) line = "{}({}, {})".format(self.name, ", ".join(self.invars), ", ".join(addr_outvars)) body.append(tab + line) line = "return {}".format(", ".join(self.outvars)) body.append(tab + line) body = "\n".join(body) return body def _get_common(self, signum, sig): """Generate code common to all the _generate_* methods.""" tab = " "*4 func_name, incodes, outcodes, retcode, header = sig # Convert ints to longs; cf. iter_variants() incodes = incodes.replace('i', 'l') outcodes = outcodes.replace('i', 'l') retcode = retcode.replace('i', 'l') if header.endswith("h"): c = True else: c = False if header.endswith("++"): cpp = True else: cpp = False intypes = list(map(lambda x: CY_TYPES[x], incodes)) outtypes = list(map(lambda x: CY_TYPES[x], outcodes)) retcode = re.sub(r'\*.*', '', retcode) if not retcode: retcode = 'v' rettype = CY_TYPES[retcode] if cpp: # Functions from _ufuncs_cxx are exported as a void* # pointers; cast them to the correct types func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name) func_name = "(<{}(*)({}) nogil>{})"\ .format(rettype, ", ".join(intypes + outtypes), func_name) else: func_name = self.cython_func_name(func_name, specialized=True) if signum == 0: adverb = "if" else: adverb = "elif" cond = self._get_conditional(self.intypes, incodes, adverb) if cond: lines = [tab + cond] sp = 2*tab else: lines = [] sp = tab return func_name, incodes, outcodes, retcode, \ intypes, outtypes, rettype, c, lines, sp def _generate_from_return_and_no_outargs(self): tab = " "*4 specs, body = [], [] for signum, sig in enumerate(self.signatures): func_name, incodes, outcodes, retcode, intypes, outtypes, \ rettype, c, lines, sp = self._get_common(signum, sig) body.extend(lines) # Generate the call to the specialized function callvars = self._get_incallvars(intypes, c) call = "{}({})".format(func_name, ", ".join(callvars)) if c and rettype == "double complex": call = double_complex_from_npy_cdouble(call) line = sp + "return {}".format(call) body.append(line) sig = "{}->{}".format(incodes, retcode) specs.append(sig) if len(specs) > 1: # Return nan for signatures without a specialization body.append(tab + "else:") outtype, outcodes = self.outtypes[0] last = len(outcodes) - 1 if len(outcodes) == 1: line = "return {}".format(NAN_VALUE[outcodes]) body.append(2*tab + line) else: for n, code in enumerate(outcodes): if n == 0: adverb = "if" elif n == last: adverb = "else" else: adverb = "elif" cond = self._get_conditional(self.outtypes, code, adverb) body.append(2*tab + cond) line = "return {}".format(NAN_VALUE[code]) body.append(3*tab + line) # Generate the head of the function callvars, head = [], [] for n, (intype, _) in enumerate(self.intypes): callvars.append("{} {}".format(intype, self.invars[n])) (outtype, _) = self.outtypes[0] dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars)) head.append(dec + ":") head.append(tab + '"""{}"""'.format(self.doc)) src = "\n".join(head + body) return dec, src, specs def _generate_from_outargs_and_no_return(self): tab = " "*4 all_tmpvars = set() specs, body = [], [] for signum, sig in enumerate(self.signatures): func_name, incodes, outcodes, retcode, intypes, outtypes, \ rettype, c, lines, sp = self._get_common(signum, sig) body.extend(lines) # Generate the call to the specialized function callvars = self._get_incallvars(intypes, c) outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c) callvars.extend(outcallvars) all_tmpvars.update(tmpvars) call = "{}({})".format(func_name, ", ".join(callvars)) body.append(sp + call) body.extend(map(lambda x: sp + x, casts)) if len(outcodes) == 1: sig = "{}->{}".format(incodes, outcodes) specs.append(sig) else: sig = "{}*{}->v".format(incodes, outcodes) specs.append(sig) if len(specs) > 1: lines = self._get_nan_decs() body.extend(lines) if len(self.outvars) == 1: line = "return {}[0]".format(self.outvars[0]) body.append(tab + line) # Generate the head of the function callvars, head = [], [] for invar, (intype, _) in zip(self.invars, self.intypes): callvars.append("{} {}".format(intype, invar)) if len(self.outvars) > 1: for outvar, (outtype, _) in zip(self.outvars, self.outtypes): callvars.append("{} *{}".format(outtype, outvar)) if len(self.outvars) == 1: outtype, _ = self.outtypes[0] dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars)) else: dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars)) head.append(dec + ":") head.append(tab + '"""{}"""'.format(self.doc)) if len(self.outvars) == 1: outvar = self.outvars[0] outtype, _ = self.outtypes[0] line = "cdef {} {}".format(outtype, outvar) head.append(tab + line) head.extend(self._get_tmp_decs(all_tmpvars)) src = "\n".join(head + body) return dec, src, specs def _generate_from_outargs_and_return(self): tab = " "*4 all_tmpvars = set() specs, body = [], [] for signum, sig in enumerate(self.signatures): func_name, incodes, outcodes, retcode, intypes, outtypes, \ rettype, c, lines, sp = self._get_common(signum, sig) body.extend(lines) # Generate the call to the specialized function callvars = self._get_incallvars(intypes, c) outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c) callvars.extend(outcallvars) all_tmpvars.update(tmpvars) call = "{}({})".format(func_name, ", ".join(callvars)) if c and rettype == "double complex": call = double_complex_from_npy_cdouble(call) call = "{}[0] = {}".format(self.outvars[0], call) body.append(sp + call) body.extend(map(lambda x: sp + x, casts)) sig = "{}*{}->v".format(incodes, outcodes + retcode) specs.append(sig) if len(specs) > 1: lines = self._get_nan_decs() body.extend(lines) # Generate the head of the function callvars, head = [], [] for invar, (intype, _) in zip(self.invars, self.intypes): callvars.append("{} {}".format(intype, invar)) for outvar, (outtype, _) in zip(self.outvars, self.outtypes): callvars.append("{} *{}".format(outtype, outvar)) dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars)) head.append(dec + ":") head.append(tab + '"""{}"""'.format(self.doc)) head.extend(self._get_tmp_decs(all_tmpvars)) src = "\n".join(head + body) return dec, src, specs def generate(self): _, _, outcodes, retcode, _ = self.signatures[0] retcode = re.sub(r'\*.*', '', retcode) if not retcode: retcode = 'v' if len(outcodes) == 0 and retcode != 'v': dec, src, specs = self._generate_from_return_and_no_outargs() elif len(outcodes) > 0 and retcode == 'v': dec, src, specs = self._generate_from_outargs_and_no_return() elif len(outcodes) > 0 and retcode != 'v': dec, src, specs = self._generate_from_outargs_and_return() else: raise ValueError("Invalid signature") if len(self.outvars) > 1: wrap = self._get_python_wrap() else: wrap = None return dec, src, specs, self.fused_types, wrap def get_declaration(ufunc, c_name, c_proto, cy_proto, header, proto_h_filename): """ Construct a Cython declaration of a function coming either from a pxd or a header file. Do sufficient tricks to enable compile-time type checking against the signature expected by the ufunc. """ defs = [] defs_h = [] var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_') if header.endswith('.pxd'): defs.append("from .%s cimport %s as %s" % ( header[:-4], ufunc.cython_func_name(c_name, prefix=""), ufunc.cython_func_name(c_name))) # check function signature at compile time proto_name = '_proto_%s_t' % var_name defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name))) defs.append("cdef %s *%s_var = &%s" % ( proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True))) else: # redeclare the function, so that the assumed # signature is checked at compile time new_name = "%s \"%s\"" % (ufunc.cython_func_name(c_name), c_name) defs.append("cdef extern from \"%s\":" % proto_h_filename) defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name))) defs_h.append("#include \"%s\"" % header) defs_h.append("%s;" % (c_proto.replace('(*)', c_name))) return defs, defs_h, var_name def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs): filename = fn_prefix + ".pyx" proto_h_filename = fn_prefix + '_defs.h' cxx_proto_h_filename = cxx_fn_prefix + '_defs.h' cxx_pyx_filename = cxx_fn_prefix + ".pyx" cxx_pxd_filename = cxx_fn_prefix + ".pxd" toplevel = "" # for _ufuncs* defs = [] defs_h = [] all_loops = {} # for _ufuncs_cxx* cxx_defs = [] cxx_pxd_defs = [ "from . cimport sf_error", "cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) nogil" ] cxx_defs_h = [] ufuncs.sort(key=lambda u: u.name) for ufunc in ufuncs: # generate function declaration and type checking snippets cfuncs = ufunc.get_prototypes() for c_name, c_proto, cy_proto, header in cfuncs: if header.endswith('++'): header = header[:-2] # for the CXX module item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto, header, cxx_proto_h_filename) cxx_defs.extend(item_defs) cxx_defs_h.extend(item_defs_h) cxx_defs.append("cdef void *_export_%s = <void*>%s" % ( var_name, ufunc.cython_func_name(c_name, specialized=True, override=False))) cxx_pxd_defs.append("cdef void *_export_%s" % (var_name,)) # let cython grab the function pointer from the c++ shared library ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name else: # usual case item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header, proto_h_filename) defs.extend(item_defs) defs_h.extend(item_defs_h) # ufunc creation code snippet t = ufunc.generate(all_loops) toplevel += t + "\n" # Produce output toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel]) with open(filename, 'w') as f: f.write(UFUNCS_EXTRA_CODE_COMMON) f.write(UFUNCS_EXTRA_CODE) f.write("\n") f.write(toplevel) f.write(UFUNCS_EXTRA_CODE_BOTTOM) defs_h = unique(defs_h) with open(proto_h_filename, 'w') as f: f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n") f.write("\n".join(defs_h)) f.write("\n#endif\n") cxx_defs_h = unique(cxx_defs_h) with open(cxx_proto_h_filename, 'w') as f: f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n") f.write("\n".join(cxx_defs_h)) f.write("\n#endif\n") with open(cxx_pyx_filename, 'w') as f: f.write(UFUNCS_EXTRA_CODE_COMMON) f.write("\n") f.write("\n".join(cxx_defs)) f.write("\n# distutils: language = c++\n") with open(cxx_pxd_filename, 'w') as f: f.write("\n".join(cxx_pxd_defs)) def generate_fused_funcs(modname, ufunc_fn_prefix, fused_funcs): pxdfile = modname + ".pxd" pyxfile = modname + ".pyx" proto_h_filename = ufunc_fn_prefix + '_defs.h' sources = [] declarations = [] # Code for benchmarks bench_aux = [] fused_types = set() # Parameters for the tests doc = [] defs = [] for func in fused_funcs: if func.name.startswith("_"): # Don't try to deal with functions that have extra layers # of wrappers. continue # Get the function declaration for the .pxd and the source # code for the .pyx dec, src, specs, func_fused_types, wrap = func.generate() declarations.append(dec) sources.append(src) if wrap: sources.append(wrap) fused_types.update(func_fused_types) # Declare the specializations cfuncs = func.get_prototypes(nptypes_for_h=True) for c_name, c_proto, cy_proto, header in cfuncs: if header.endswith('++'): # We grab the c++ functions from the c++ module continue item_defs, _, _ = get_declaration(func, c_name, c_proto, cy_proto, header, proto_h_filename) defs.extend(item_defs) # Add a line to the documentation doc.append(generate_doc(func.name, specs)) # Generate code for benchmarks if func.name in CYTHON_SPECIAL_BENCHFUNCS: for codes in CYTHON_SPECIAL_BENCHFUNCS[func.name]: pybench, cybench = generate_bench(func.name, codes) bench_aux.extend([pybench, cybench]) fused_types = list(fused_types) fused_types.sort() with open(pxdfile, 'w') as f: f.write(CYTHON_SPECIAL_PXD) f.write("\n") f.write("\n\n".join(fused_types)) f.write("\n\n") f.write("\n".join(declarations)) with open(pyxfile, 'w') as f: header = CYTHON_SPECIAL_PYX header = header.replace("FUNCLIST", "\n".join(doc)) f.write(header) f.write("\n") f.write("\n".join(defs)) f.write("\n\n") f.write("\n\n".join(sources)) f.write("\n\n") f.write("\n\n".join(bench_aux)) def unique(lst): """ Return a list without repeated entries (first occurrence is kept), preserving order. """ seen = set() new_lst = [] for item in lst: if item in seen: continue seen.add(item) new_lst.append(item) return new_lst def all_newer(src_files, dst_files): from distutils.dep_util import newer return all(os.path.exists(dst) and newer(dst, src) for dst in dst_files for src in src_files) def main(): p = optparse.OptionParser(usage=__doc__.strip()) options, args = p.parse_args() if len(args) != 0: p.error('invalid number of arguments') pwd = os.path.dirname(__file__) src_files = (os.path.abspath(__file__), os.path.abspath(os.path.join(pwd, 'functions.json')), os.path.abspath(os.path.join(pwd, 'add_newdocs.py'))) dst_files = ('_ufuncs.pyx', '_ufuncs_defs.h', '_ufuncs_cxx.pyx', '_ufuncs_cxx.pxd', '_ufuncs_cxx_defs.h', 'cython_special.pyx', 'cython_special.pxd') os.chdir(BASE_DIR) if all_newer(src_files, dst_files): print("scipy/special/_generate_pyx.py: all files up-to-date") return ufuncs, fused_funcs = [], [] with open('functions.json') as data: functions = json.load(data) for f, sig in functions.items(): ufuncs.append(Ufunc(f, sig)) fused_funcs.append(FusedFunc(f, sig)) generate_ufuncs("_ufuncs", "_ufuncs_cxx", ufuncs) generate_fused_funcs("cython_special", "_ufuncs", fused_funcs) if __name__ == "__main__": main()
48,288
34.068264
108
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_mptestutils.py
from __future__ import division, print_function, absolute_import import os import sys import time import numpy as np from numpy.testing import assert_ import pytest from scipy._lib.six import reraise from scipy.special._testutils import assert_func_equal try: import mpmath except ImportError: pass # ------------------------------------------------------------------------------ # Machinery for systematic tests with mpmath # ------------------------------------------------------------------------------ class Arg(object): """Generate a set of numbers on the real axis, concentrating on 'interesting' regions and covering all orders of magnitude. """ def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True): if a > b: raise ValueError("a should be less than or equal to b") if a == -np.inf: a = -0.5*np.finfo(float).max if b == np.inf: b = 0.5*np.finfo(float).max self.a, self.b = a, b self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b def _positive_values(self, a, b, n): if a < 0: raise ValueError("a should be positive") # Try to put half of the points into a linspace between a and # 10 the other half in a logspace. if n % 2 == 0: nlogpts = n//2 nlinpts = nlogpts else: nlogpts = n//2 nlinpts = nlogpts + 1 if a >= 10: # Outside of linspace range; just return a logspace. pts = np.logspace(np.log10(a), np.log10(b), n) elif a > 0 and b < 10: # Outside of logspace range; just return a linspace pts = np.linspace(a, b, n) elif a > 0: # Linspace between a and 10 and a logspace between 10 and # b. linpts = np.linspace(a, 10, nlinpts, endpoint=False) logpts = np.logspace(1, np.log10(b), nlogpts) pts = np.hstack((linpts, logpts)) elif a == 0 and b <= 10: # Linspace between 0 and b and a logspace between 0 and # the smallest positive point of the linspace linpts = np.linspace(0, b, nlinpts) if linpts.size > 1: right = np.log10(linpts[1]) else: right = -30 logpts = np.logspace(-30, right, nlogpts, endpoint=False) pts = np.hstack((logpts, linpts)) else: # Linspace between 0 and 10, logspace between 0 and the # smallest positive point of the linspace, and a logspace # between 10 and b. if nlogpts % 2 == 0: nlogpts1 = nlogpts//2 nlogpts2 = nlogpts1 else: nlogpts1 = nlogpts//2 nlogpts2 = nlogpts1 + 1 linpts = np.linspace(0, 10, nlinpts, endpoint=False) if linpts.size > 1: right = np.log10(linpts[1]) else: right = -30 logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False) logpts2 = np.logspace(1, np.log10(b), nlogpts2) pts = np.hstack((logpts1, linpts, logpts2)) return np.sort(pts) def values(self, n): """Return an array containing n numbers.""" a, b = self.a, self.b if a == b: return np.zeros(n) if not self.inclusive_a: n += 1 if not self.inclusive_b: n += 1 if n % 2 == 0: n1 = n//2 n2 = n1 else: n1 = n//2 n2 = n1 + 1 if a >= 0: pospts = self._positive_values(a, b, n) negpts = [] elif b <= 0: pospts = [] negpts = -self._positive_values(-b, -a, n) else: pospts = self._positive_values(0, b, n1) negpts = -self._positive_values(0, -a, n2 + 1) # Don't want to get zero twice negpts = negpts[1:] pts = np.hstack((negpts[::-1], pospts)) if not self.inclusive_a: pts = pts[1:] if not self.inclusive_b: pts = pts[:-1] return pts class FixedArg(object): def __init__(self, values): self._values = np.asarray(values) def values(self, n): return self._values class ComplexArg(object): def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)): self.real = Arg(a.real, b.real) self.imag = Arg(a.imag, b.imag) def values(self, n): m = int(np.floor(np.sqrt(n))) x = self.real.values(m) y = self.imag.values(m + 1) return (x[:,None] + 1j*y[None,:]).ravel() class IntArg(object): def __init__(self, a=-1000, b=1000): self.a = a self.b = b def values(self, n): v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int) v2 = np.arange(-5, 5) v = np.unique(np.r_[v1, v2]) v = v[(v >= self.a) & (v < self.b)] return v def get_args(argspec, n): if isinstance(argspec, np.ndarray): args = argspec.copy() else: nargs = len(argspec) ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec]) ms = (n**(ms/sum(ms))).astype(int) + 1 args = [] for spec, m in zip(argspec, ms): args.append(spec.values(m)) args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T return args class MpmathData(object): def __init__(self, scipy_func, mpmath_func, arg_spec, name=None, dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300, ignore_inf_sign=False, distinguish_nan_and_inf=True, nan_ok=True, param_filter=None): # mpmath tests are really slow (see gh-6989). Use a small number of # points by default, increase back to 5000 (old default) if XSLOW is # set if n is None: try: is_xslow = int(os.environ.get('SCIPY_XSLOW', '0')) except ValueError: is_xslow = False n = 5000 if is_xslow else 500 self.scipy_func = scipy_func self.mpmath_func = mpmath_func self.arg_spec = arg_spec self.dps = dps self.prec = prec self.n = n self.rtol = rtol self.atol = atol self.ignore_inf_sign = ignore_inf_sign self.nan_ok = nan_ok if isinstance(self.arg_spec, np.ndarray): self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating) else: self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec]) self.ignore_inf_sign = ignore_inf_sign self.distinguish_nan_and_inf = distinguish_nan_and_inf if not name or name == '<lambda>': name = getattr(scipy_func, '__name__', None) if not name or name == '<lambda>': name = getattr(mpmath_func, '__name__', None) self.name = name self.param_filter = param_filter def check(self): np.random.seed(1234) # Generate values for the arguments argarr = get_args(self.arg_spec, self.n) # Check old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: if self.dps is not None: dps_list = [self.dps] else: dps_list = [20] if self.prec is not None: mpmath.mp.prec = self.prec # Proper casting of mpmath input and output types. Using # native mpmath types as inputs gives improved precision # in some cases. if np.issubdtype(argarr.dtype, np.complexfloating): pytype = mpc2complex def mptype(x): return mpmath.mpc(complex(x)) else: def mptype(x): return mpmath.mpf(float(x)) def pytype(x): if abs(x.imag) > 1e-16*(1 + abs(x.real)): return np.nan else: return mpf2float(x.real) # Try out different dps until one (or none) works for j, dps in enumerate(dps_list): mpmath.mp.dps = dps try: assert_func_equal(self.scipy_func, lambda *a: pytype(self.mpmath_func(*map(mptype, a))), argarr, vectorized=False, rtol=self.rtol, atol=self.atol, ignore_inf_sign=self.ignore_inf_sign, distinguish_nan_and_inf=self.distinguish_nan_and_inf, nan_ok=self.nan_ok, param_filter=self.param_filter) break except AssertionError: if j >= len(dps_list)-1: reraise(*sys.exc_info()) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec def __repr__(self): if self.is_complex: return "<MpmathData: %s (complex)>" % (self.name,) else: return "<MpmathData: %s>" % (self.name,) def assert_mpmath_equal(*a, **kw): d = MpmathData(*a, **kw) d.check() def nonfunctional_tooslow(func): return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func) # ------------------------------------------------------------------------------ # Tools for dealing with mpmath quirks # ------------------------------------------------------------------------------ def mpf2float(x): """ Convert an mpf to the nearest floating point number. Just using float directly doesn't work because of results like this: with mp.workdps(50): float(mpf("0.99999999999999999")) = 0.9999999999999999 """ return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) def mpc2complex(x): return complex(mpf2float(x.real), mpf2float(x.imag)) def trace_args(func): def tofloat(x): if isinstance(x, mpmath.mpc): return complex(x) else: return float(x) def wrap(*a, **kw): sys.stderr.write("%r: " % (tuple(map(tofloat, a)),)) sys.stderr.flush() try: r = func(*a, **kw) sys.stderr.write("-> %r" % r) finally: sys.stderr.write("\n") sys.stderr.flush() return r return wrap try: import posix import signal POSIX = ('setitimer' in dir(signal)) except ImportError: POSIX = False class TimeoutError(Exception): pass def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True): """ Decorator for setting a timeout for pure-Python functions. If the function does not return within `timeout` seconds, the value `return_val` is returned instead. On POSIX this uses SIGALRM by default. On non-POSIX, settrace is used. Do not use this with threads: the SIGALRM implementation does probably not work well. The settrace implementation only traces the current thread. The settrace implementation slows down execution speed. Slowdown by a factor around 10 is probably typical. """ if POSIX and use_sigalrm: def sigalrm_handler(signum, frame): raise TimeoutError() def deco(func): def wrap(*a, **kw): old_handler = signal.signal(signal.SIGALRM, sigalrm_handler) signal.setitimer(signal.ITIMER_REAL, timeout) try: return func(*a, **kw) except TimeoutError: return return_val finally: signal.setitimer(signal.ITIMER_REAL, 0) signal.signal(signal.SIGALRM, old_handler) return wrap else: def deco(func): def wrap(*a, **kw): start_time = time.time() def trace(frame, event, arg): if time.time() - start_time > timeout: raise TimeoutError() return trace sys.settrace(trace) try: return func(*a, **kw) except TimeoutError: sys.settrace(None) return return_val finally: sys.settrace(None) return wrap return deco def exception_to_nan(func): """Decorate function to return nan if it raises an exception""" def wrap(*a, **kw): try: return func(*a, **kw) except Exception: return np.nan return wrap def inf_to_nan(func): """Decorate function to return nan if it returns inf""" def wrap(*a, **kw): v = func(*a, **kw) if not np.isfinite(v): return np.nan return v return wrap def mp_assert_allclose(res, std, atol=0, rtol=1e-17): """ Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it can be done to higher precision than double. """ try: len(res) except TypeError: res = list(res) n = len(std) if len(res) != n: raise AssertionError("Lengths of inputs not equal.") failures = [] for k in range(n): try: assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k])) except AssertionError: failures.append(k) ndigits = int(abs(np.log10(rtol))) msg = [""] msg.append("Bad results ({} out of {}) for the following points:" .format(len(failures), n)) for k in failures: resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0) stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0) if std[k] == 0: rdiff = "inf" else: rdiff = mpmath.fabs((res[k] - std[k])/std[k]) rdiff = mpmath.nstr(rdiff, 3) msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff)) if failures: assert_(False, "\n".join(msg))
14,382
30.541667
100
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/setup.py
from __future__ import division, print_function, absolute_import import os import sys from os.path import join, dirname from distutils.sysconfig import get_python_inc import subprocess import numpy from numpy.distutils.misc_util import get_numpy_include_dirs try: from numpy.distutils.misc_util import get_info except ImportError: raise ValueError("numpy >= 1.4 is required (detected %s from %s)" % (numpy.__version__, numpy.__file__)) def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info as get_system_info config = Configuration('special', parent_package, top_path) define_macros = [] if sys.platform == 'win32': # define_macros.append(('NOINFINITIES',None)) # define_macros.append(('NONANS',None)) define_macros.append(('_USE_MATH_DEFINES',None)) curdir = os.path.abspath(os.path.dirname(__file__)) inc_dirs = [get_python_inc(), os.path.join(curdir, "c_misc")] if inc_dirs[0] != get_python_inc(plat_specific=1): inc_dirs.append(get_python_inc(plat_specific=1)) inc_dirs.insert(0, get_numpy_include_dirs()) inc_dirs.append(join(dirname(dirname(__file__)), '_lib')) # C libraries c_misc_src = [join('c_misc','*.c')] c_misc_hdr = [join('c_misc','*.h')] cephes_src = [join('cephes','*.c')] cephes_hdr = [join('cephes', '*.h')] config.add_library('sc_c_misc',sources=c_misc_src, include_dirs=[curdir] + inc_dirs, depends=(cephes_hdr + cephes_src + c_misc_hdr + cephes_hdr + ['*.h']), macros=define_macros) config.add_library('sc_cephes',sources=cephes_src, include_dirs=[curdir] + inc_dirs, depends=(cephes_hdr + ['*.h']), macros=define_macros) # Fortran/C++ libraries mach_src = [join('mach','*.f')] amos_src = [join('amos','*.f')] cdf_src = [join('cdflib','*.f')] specfun_src = [join('specfun','*.f')] config.add_library('sc_mach',sources=mach_src, config_fc={'noopt':(__file__,1)}) config.add_library('sc_amos',sources=amos_src) config.add_library('sc_cdf',sources=cdf_src) config.add_library('sc_specfun',sources=specfun_src) # Extension specfun config.add_extension('specfun', sources=['specfun.pyf'], f2py_options=['--no-wrap-functions'], depends=specfun_src, define_macros=[], libraries=['sc_specfun']) # Extension _ufuncs headers = ['*.h', join('c_misc', '*.h'), join('cephes', '*.h')] ufuncs_src = ['_ufuncs.c', 'sf_error.c', '_logit.c.src', "amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"] ufuncs_dep = (headers + ufuncs_src + amos_src + c_misc_src + cephes_src + mach_src + cdf_src + specfun_src) cfg = dict(get_system_info('lapack_opt')) cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()]) cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach', 'sc_cdf', 'sc_specfun']) cfg.setdefault('define_macros', []).extend(define_macros) config.add_extension('_ufuncs', depends=ufuncs_dep, sources=ufuncs_src, extra_info=get_info("npymath"), **cfg) # Extension _ufuncs_cxx ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.c', '_faddeeva.cxx', 'Faddeeva.cc', '_wright.cxx', 'wright.cc'] ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src + ['*.hh']) config.add_extension('_ufuncs_cxx', sources=ufuncs_cxx_src, depends=ufuncs_cxx_dep, include_dirs=[curdir] + inc_dirs, define_macros=define_macros, extra_info=get_info("npymath")) cfg = dict(get_system_info('lapack_opt')) config.add_extension('_ellip_harm_2', sources=['_ellip_harm_2.c', 'sf_error.c',], **cfg ) # Cython API config.add_data_files('cython_special.pxd') cython_special_src = ['cython_special.c', 'sf_error.c', '_logit.c.src', "amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"] cython_special_dep = (headers + ufuncs_src + ufuncs_cxx_src + amos_src + c_misc_src + cephes_src + mach_src + cdf_src + specfun_src) cfg = dict(get_system_info('lapack_opt')) cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()]) cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach', 'sc_cdf', 'sc_specfun']) cfg.setdefault('define_macros', []).extend(define_macros) config.add_extension('cython_special', depends=cython_special_dep, sources=cython_special_src, extra_info=get_info("npymath"), **cfg) # combinatorics config.add_extension('_comb', sources=['_comb.c']) # testing for _round.h config.add_extension('_test_round', sources=['_test_round.c'], depends=['_round.h', 'cephes/dd_idefs.h'], include_dirs=[numpy.get_include()] + inc_dirs, extra_info=get_info('npymath')) config.add_data_files('tests/*.py') config.add_data_files('tests/data/README') # regenerate npz data files makenpz = os.path.join(os.path.dirname(__file__), 'utils', 'makenpz.py') data_dir = os.path.join(os.path.dirname(__file__), 'tests', 'data') for name in ['boost', 'gsl', 'local']: subprocess.check_call([sys.executable, makenpz, '--use-timestamp', os.path.join(data_dir, name)]) config.add_data_files('tests/data/*.npz') config.add_subpackage('_precompute') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
6,679
40.490683
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/spfun_stats.py
# Last Change: Sat Mar 21 02:00 PM 2009 J # Copyright (c) 2001, 2002 Enthought, Inc. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # a. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # b. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # c. Neither the name of the Enthought nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. """Some more special functions which may be useful for multivariate statistical analysis.""" from __future__ import division, print_function, absolute_import import numpy as np from scipy.special import gammaln as loggam __all__ = ['multigammaln'] def multigammaln(a, d): r"""Returns the log of multivariate gamma, also sometimes called the generalized gamma. Parameters ---------- a : ndarray The multivariate gamma is computed for each item of `a`. d : int The dimension of the space of integration. Returns ------- res : ndarray The values of the log multivariate gamma at the given points `a`. Notes ----- The formal definition of the multivariate gamma of dimension d for a real `a` is .. math:: \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of all the positive definite matrices of dimension `d`. Note that `a` is a scalar: the integrand only is multivariate, the argument is not (the function is defined over a subset of the real set). This can be proven to be equal to the much friendlier equation .. math:: \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2). References ---------- R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in probability and mathematical statistics). """ a = np.asarray(a) if not np.isscalar(d) or (np.floor(d) != d): raise ValueError("d should be a positive integer (dimension)") if np.any(a <= 0.5 * (d - 1)): raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met" % (a, 0.5 * (d-1))) res = (d * (d-1) * 0.25) * np.log(np.pi) res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0) return res
3,499
35.458333
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/add_newdocs.py
# Docstrings for generated ufuncs # # The syntax is designed to look like the function add_newdoc is being # called from numpy.lib, but in this file add_newdoc puts the # docstrings in a dictionary. This dictionary is used in # _generate_pyx.py to generate the docstrings for the ufuncs in # scipy.special at the C level when the ufuncs are created at compile # time. from __future__ import division, print_function, absolute_import docdict = {} def get(name): return docdict.get(name) def add_newdoc(place, name, doc): docdict['.'.join((place, name))] = doc add_newdoc("scipy.special", "_sf_error_test_function", """ Private function; do not use. """) add_newdoc("scipy.special", "sph_harm", r""" sph_harm(m, n, theta, phi) Compute spherical harmonics. The spherical harmonics are defined as .. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi)) where :math:`P_n^m` are the associated Legendre functions; see `lpmv`. Parameters ---------- m : array_like Order of the harmonic (int); must have ``|m| <= n``. n : array_like Degree of the harmonic (int); must have ``n >= 0``. This is often denoted by ``l`` (lower case L) in descriptions of spherical harmonics. theta : array_like Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``. phi : array_like Polar (colatitudinal) coordinate; must be in ``[0, pi]``. Returns ------- y_mn : complex float The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``. Notes ----- There are different conventions for the meanings of the input arguments ``theta`` and ``phi``. In SciPy ``theta`` is the azimuthal angle and ``phi`` is the polar angle. It is common to see the opposite convention, that is, ``theta`` as the polar angle and ``phi`` as the azimuthal angle. Note that SciPy's spherical harmonics include the Condon-Shortley phase [2]_ because it is part of `lpmv`. With SciPy's conventions, the first several spherical harmonics are .. math:: Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\ Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}} e^{-i\theta} \sin(\phi) \\ Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}} \cos(\phi) \\ Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}} e^{i\theta} \sin(\phi). References ---------- .. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30 .. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase """) add_newdoc("scipy.special", "_ellip_harm", """ Internal function, use `ellip_harm` instead. """) add_newdoc("scipy.special", "_ellip_norm", """ Internal function, use `ellip_norm` instead. """) add_newdoc("scipy.special", "_lambertw", """ Internal function, use `lambertw` instead. """) add_newdoc("scipy.special", "wrightomega", r""" wrightomega(z, out=None) Wright Omega function. Defined as the solution to .. math:: \omega + \log(\omega) = z where :math:`\log` is the principal branch of the complex logarithm. Parameters ---------- z : array_like Points at which to evaluate the Wright Omega function Returns ------- omega : ndarray Values of the Wright Omega function Notes ----- .. versionadded:: 0.19.0 The function can also be defined as .. math:: \omega(z) = W_{K(z)}(e^z) where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the unwinding number and :math:`W` is the Lambert W function. The implementation here is taken from [1]_. See Also -------- lambertw : The Lambert W function References ---------- .. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex Double-Precision Evaluation of the Wright :math:`\omega` Function." ACM Transactions on Mathematical Software, 2012. :doi:`10.1145/2168773.2168779`. """) add_newdoc("scipy.special", "agm", """ agm(a, b) Compute the arithmetic-geometric mean of `a` and `b`. Start with a_0 = a and b_0 = b and iteratively compute:: a_{n+1} = (a_n + b_n)/2 b_{n+1} = sqrt(a_n*b_n) a_n and b_n converge to the same limit as n increases; their common limit is agm(a, b). Parameters ---------- a, b : array_like Real values only. If the values are both negative, the result is negative. If one value is negative and the other is positive, `nan` is returned. Returns ------- float The arithmetic-geometric mean of `a` and `b`. Examples -------- >>> from scipy.special import agm >>> a, b = 24.0, 6.0 >>> agm(a, b) 13.458171481725614 Compare that result to the iteration: >>> while a != b: ... a, b = (a + b)/2, np.sqrt(a*b) ... print("a = %19.16f b=%19.16f" % (a, b)) ... a = 15.0000000000000000 b=12.0000000000000000 a = 13.5000000000000000 b=13.4164078649987388 a = 13.4582039324993694 b=13.4581390309909850 a = 13.4581714817451772 b=13.4581714817060547 a = 13.4581714817256159 b=13.4581714817256159 When array-like arguments are given, broadcasting applies: >>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1). >>> b = np.array([6, 12, 24, 48]) # b has shape (4,). >>> agm(a, b) array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756], [ 4.37037309, 6.72908574, 10.84726853, 18.11597502], [ 6. , 8.74074619, 13.45817148, 21.69453707]]) """) add_newdoc("scipy.special", "airy", r""" airy(z) Airy functions and their derivatives. Parameters ---------- z : array_like Real or complex argument. Returns ------- Ai, Aip, Bi, Bip : ndarrays Airy functions Ai and Bi, and their derivatives Aip and Bip. Notes ----- The Airy functions Ai and Bi are two independent solutions of .. math:: y''(x) = x y(x). For real `z` in [-10, 10], the computation is carried out by calling the Cephes [1]_ `airy` routine, which uses power series summation for small `z` and rational minimax approximations for large `z`. Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are employed. They are computed using power series for :math:`|z| < 1` and the following relations to modified Bessel functions for larger `z` (where :math:`t \equiv 2 z^{3/2}/3`): .. math:: Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t) Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t) Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right) Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right) See also -------- airye : exponentially scaled Airy functions. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ Examples -------- Compute the Airy functions on the interval [-15, 5]. >>> from scipy import special >>> x = np.linspace(-15, 5, 201) >>> ai, aip, bi, bip = special.airy(x) Plot Ai(x) and Bi(x). >>> import matplotlib.pyplot as plt >>> plt.plot(x, ai, 'r', label='Ai(x)') >>> plt.plot(x, bi, 'b--', label='Bi(x)') >>> plt.ylim(-0.5, 1.0) >>> plt.grid() >>> plt.legend(loc='upper left') >>> plt.show() """) add_newdoc("scipy.special", "airye", """ airye(z) Exponentially scaled Airy functions and their derivatives. Scaling:: eAi = Ai * exp(2.0/3.0*z*sqrt(z)) eAip = Aip * exp(2.0/3.0*z*sqrt(z)) eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) Parameters ---------- z : array_like Real or complex argument. Returns ------- eAi, eAip, eBi, eBip : array_like Airy functions Ai and Bi, and their derivatives Aip and Bip Notes ----- Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`. See also -------- airy References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "bdtr", r""" bdtr(k, n, p) Binomial distribution cumulative distribution function. Sum of the terms 0 through `k` of the Binomial probability density. .. math:: \mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j} Parameters ---------- k : array_like Number of successes (int). n : array_like Number of events (int). p : array_like Probability of success in a single event (float). Returns ------- y : ndarray Probability of `k` or fewer successes in `n` independent events with success probabilities of `p`. Notes ----- The terms are not summed directly; instead the regularized incomplete beta function is employed, according to the formula, .. math:: \mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1). Wrapper for the Cephes [1]_ routine `bdtr`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "bdtrc", r""" bdtrc(k, n, p) Binomial distribution survival function. Sum of the terms `k + 1` through `n` of the binomial probability density, .. math:: \mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j} Parameters ---------- k : array_like Number of successes (int). n : array_like Number of events (int) p : array_like Probability of success in a single event. Returns ------- y : ndarray Probability of `k + 1` or more successes in `n` independent events with success probabilities of `p`. See also -------- bdtr betainc Notes ----- The terms are not summed directly; instead the regularized incomplete beta function is employed, according to the formula, .. math:: \mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k). Wrapper for the Cephes [1]_ routine `bdtrc`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "bdtri", """ bdtri(k, n, y) Inverse function to `bdtr` with respect to `p`. Finds the event probability `p` such that the sum of the terms 0 through `k` of the binomial probability density is equal to the given cumulative probability `y`. Parameters ---------- k : array_like Number of successes (float). n : array_like Number of events (float) y : array_like Cumulative probability (probability of `k` or fewer successes in `n` events). Returns ------- p : ndarray The event probability such that `bdtr(k, n, p) = y`. See also -------- bdtr betaincinv Notes ----- The computation is carried out using the inverse beta integral function and the relation,:: 1 - p = betaincinv(n - k, k + 1, y). Wrapper for the Cephes [1]_ routine `bdtri`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "bdtrik", """ bdtrik(y, n, p) Inverse function to `bdtr` with respect to `k`. Finds the number of successes `k` such that the sum of the terms 0 through `k` of the Binomial probability density for `n` events with probability `p` is equal to the given cumulative probability `y`. Parameters ---------- y : array_like Cumulative probability (probability of `k` or fewer successes in `n` events). n : array_like Number of events (float). p : array_like Success probability (float). Returns ------- k : ndarray The number of successes `k` such that `bdtr(k, n, p) = y`. See also -------- bdtr Notes ----- Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the cumulative incomplete beta distribution. Computation of `k` involves a search for a value that produces the desired value of `y`. The search relies on the monotonicity of `y` with `k`. Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. References ---------- .. [1] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [2] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. """) add_newdoc("scipy.special", "bdtrin", """ bdtrin(k, y, p) Inverse function to `bdtr` with respect to `n`. Finds the number of events `n` such that the sum of the terms 0 through `k` of the Binomial probability density for events with probability `p` is equal to the given cumulative probability `y`. Parameters ---------- k : array_like Number of successes (float). y : array_like Cumulative probability (probability of `k` or fewer successes in `n` events). p : array_like Success probability (float). Returns ------- n : ndarray The number of events `n` such that `bdtr(k, n, p) = y`. See also -------- bdtr Notes ----- Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the cumulative incomplete beta distribution. Computation of `n` involves a search for a value that produces the desired value of `y`. The search relies on the monotonicity of `y` with `n`. Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. References ---------- .. [1] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [2] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. """) add_newdoc("scipy.special", "binom", """ binom(n, k) Binomial coefficient See Also -------- comb : The number of combinations of N things taken k at a time. """) add_newdoc("scipy.special", "btdtria", r""" btdtria(p, b, x) Inverse of `btdtr` with respect to `a`. This is the inverse of the beta cumulative distribution function, `btdtr`, considered as a function of `a`, returning the value of `a` for which `btdtr(a, b, x) = p`, or .. math:: p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt Parameters ---------- p : array_like Cumulative probability, in [0, 1]. b : array_like Shape parameter (`b` > 0). x : array_like The quantile, in [0, 1]. Returns ------- a : ndarray The value of the shape parameter `a` such that `btdtr(a, b, x) = p`. See Also -------- btdtr : Cumulative density function of the beta distribution. btdtri : Inverse with respect to `x`. btdtrib : Inverse with respect to `b`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. The cumulative distribution function `p` is computed using a routine by DiDinato and Morris [2]_. Computation of `a` involves a search for a value that produces the desired value of `p`. The search relies on the monotonicity of `p` with `a`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] DiDinato, A. R. and Morris, A. H., Algorithm 708: Significant Digit Computation of the Incomplete Beta Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. """) add_newdoc("scipy.special", "btdtrib", r""" btdtria(a, p, x) Inverse of `btdtr` with respect to `b`. This is the inverse of the beta cumulative distribution function, `btdtr`, considered as a function of `b`, returning the value of `b` for which `btdtr(a, b, x) = p`, or .. math:: p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt Parameters ---------- a : array_like Shape parameter (`a` > 0). p : array_like Cumulative probability, in [0, 1]. x : array_like The quantile, in [0, 1]. Returns ------- b : ndarray The value of the shape parameter `b` such that `btdtr(a, b, x) = p`. See Also -------- btdtr : Cumulative density function of the beta distribution. btdtri : Inverse with respect to `x`. btdtria : Inverse with respect to `a`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. The cumulative distribution function `p` is computed using a routine by DiDinato and Morris [2]_. Computation of `b` involves a search for a value that produces the desired value of `p`. The search relies on the monotonicity of `p` with `b`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] DiDinato, A. R. and Morris, A. H., Algorithm 708: Significant Digit Computation of the Incomplete Beta Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. """) add_newdoc("scipy.special", "bei", """ bei(x) Kelvin function bei """) add_newdoc("scipy.special", "beip", """ beip(x) Derivative of the Kelvin function `bei` """) add_newdoc("scipy.special", "ber", """ ber(x) Kelvin function ber. """) add_newdoc("scipy.special", "berp", """ berp(x) Derivative of the Kelvin function `ber` """) add_newdoc("scipy.special", "besselpoly", r""" besselpoly(a, lmb, nu) Weighted integral of a Bessel function. .. math:: \int_0^1 x^\lambda J_\nu(2 a x) \, dx where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`, :math:`\nu=nu`. """) add_newdoc("scipy.special", "beta", """ beta(a, b) Beta function. :: beta(a, b) = gamma(a) * gamma(b) / gamma(a+b) """) add_newdoc("scipy.special", "betainc", """ betainc(a, b, x) Incomplete beta integral. Compute the incomplete beta integral of the arguments, evaluated from zero to `x`:: gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x). Notes ----- The incomplete beta is also sometimes defined without the terms in gamma, in which case the above definition is the so-called regularized incomplete beta. Under this definition, you can get the incomplete beta by multiplying the result of the scipy function by beta(a, b). """) add_newdoc("scipy.special", "betaincinv", """ betaincinv(a, b, y) Inverse function to beta integral. Compute `x` such that betainc(a, b, x) = y. """) add_newdoc("scipy.special", "betaln", """ betaln(a, b) Natural logarithm of absolute value of beta function. Computes ``ln(abs(beta(a, b)))``. """) add_newdoc("scipy.special", "boxcox", """ boxcox(x, lmbda) Compute the Box-Cox transformation. The Box-Cox transformation is:: y = (x**lmbda - 1) / lmbda if lmbda != 0 log(x) if lmbda == 0 Returns `nan` if ``x < 0``. Returns `-inf` if ``x == 0`` and ``lmbda < 0``. Parameters ---------- x : array_like Data to be transformed. lmbda : array_like Power parameter of the Box-Cox transform. Returns ------- y : array Transformed data. Notes ----- .. versionadded:: 0.14.0 Examples -------- >>> from scipy.special import boxcox >>> boxcox([1, 4, 10], 2.5) array([ 0. , 12.4 , 126.09110641]) >>> boxcox(2, [0, 1, 2]) array([ 0.69314718, 1. , 1.5 ]) """) add_newdoc("scipy.special", "boxcox1p", """ boxcox1p(x, lmbda) Compute the Box-Cox transformation of 1 + `x`. The Box-Cox transformation computed by `boxcox1p` is:: y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 log(1+x) if lmbda == 0 Returns `nan` if ``x < -1``. Returns `-inf` if ``x == -1`` and ``lmbda < 0``. Parameters ---------- x : array_like Data to be transformed. lmbda : array_like Power parameter of the Box-Cox transform. Returns ------- y : array Transformed data. Notes ----- .. versionadded:: 0.14.0 Examples -------- >>> from scipy.special import boxcox1p >>> boxcox1p(1e-4, [0, 0.5, 1]) array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04]) >>> boxcox1p([0.01, 0.1], 0.25) array([ 0.00996272, 0.09645476]) """) add_newdoc("scipy.special", "inv_boxcox", """ inv_boxcox(y, lmbda) Compute the inverse of the Box-Cox transformation. Find ``x`` such that:: y = (x**lmbda - 1) / lmbda if lmbda != 0 log(x) if lmbda == 0 Parameters ---------- y : array_like Data to be transformed. lmbda : array_like Power parameter of the Box-Cox transform. Returns ------- x : array Transformed data. Notes ----- .. versionadded:: 0.16.0 Examples -------- >>> from scipy.special import boxcox, inv_boxcox >>> y = boxcox([1, 4, 10], 2.5) >>> inv_boxcox(y, 2.5) array([1., 4., 10.]) """) add_newdoc("scipy.special", "inv_boxcox1p", """ inv_boxcox1p(y, lmbda) Compute the inverse of the Box-Cox transformation. Find ``x`` such that:: y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 log(1+x) if lmbda == 0 Parameters ---------- y : array_like Data to be transformed. lmbda : array_like Power parameter of the Box-Cox transform. Returns ------- x : array Transformed data. Notes ----- .. versionadded:: 0.16.0 Examples -------- >>> from scipy.special import boxcox1p, inv_boxcox1p >>> y = boxcox1p([1, 4, 10], 2.5) >>> inv_boxcox1p(y, 2.5) array([1., 4., 10.]) """) add_newdoc("scipy.special", "btdtr", r""" btdtr(a, b, x) Cumulative density function of the beta distribution. Returns the integral from zero to `x` of the beta probability density function, .. math:: I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt where :math:`\Gamma` is the gamma function. Parameters ---------- a : array_like Shape parameter (a > 0). b : array_like Shape parameter (b > 0). x : array_like Upper limit of integration, in [0, 1]. Returns ------- I : ndarray Cumulative density function of the beta distribution with parameters `a` and `b` at `x`. See Also -------- betainc Notes ----- This function is identical to the incomplete beta integral function `betainc`. Wrapper for the Cephes [1]_ routine `btdtr`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "btdtri", r""" btdtri(a, b, p) The `p`-th quantile of the beta distribution. This function is the inverse of the beta cumulative distribution function, `btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or .. math:: p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt Parameters ---------- a : array_like Shape parameter (`a` > 0). b : array_like Shape parameter (`b` > 0). p : array_like Cumulative probability, in [0, 1]. Returns ------- x : ndarray The quantile corresponding to `p`. See Also -------- betaincinv btdtr Notes ----- The value of `x` is found by interval halving or Newton iterations. Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent problem of finding the inverse of the incomplete beta integral. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "cbrt", """ cbrt(x) Element-wise cube root of `x`. Parameters ---------- x : array_like `x` must contain real numbers. Returns ------- float The cube root of each value in `x`. Examples -------- >>> from scipy.special import cbrt >>> cbrt(8) 2.0 >>> cbrt([-8, -3, 0.125, 1.331]) array([-2. , -1.44224957, 0.5 , 1.1 ]) """) add_newdoc("scipy.special", "chdtr", """ chdtr(v, x) Chi square cumulative distribution function Returns the area under the left hand tail (from 0 to `x`) of the Chi square probability density function with `v` degrees of freedom:: 1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x) """) add_newdoc("scipy.special", "chdtrc", """ chdtrc(v, x) Chi square survival function Returns the area under the right hand tail (from `x` to infinity) of the Chi square probability density function with `v` degrees of freedom:: 1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf) """) add_newdoc("scipy.special", "chdtri", """ chdtri(v, p) Inverse to `chdtrc` Returns the argument x such that ``chdtrc(v, x) == p``. """) add_newdoc("scipy.special", "chdtriv", """ chdtriv(p, x) Inverse to `chdtr` vs `v` Returns the argument v such that ``chdtr(v, x) == p``. """) add_newdoc("scipy.special", "chndtr", """ chndtr(x, df, nc) Non-central chi square cumulative distribution function """) add_newdoc("scipy.special", "chndtrix", """ chndtrix(p, df, nc) Inverse to `chndtr` vs `x` """) add_newdoc("scipy.special", "chndtridf", """ chndtridf(x, p, nc) Inverse to `chndtr` vs `df` """) add_newdoc("scipy.special", "chndtrinc", """ chndtrinc(x, df, p) Inverse to `chndtr` vs `nc` """) add_newdoc("scipy.special", "cosdg", """ cosdg(x) Cosine of the angle `x` given in degrees. """) add_newdoc("scipy.special", "cosm1", """ cosm1(x) cos(x) - 1 for use when `x` is near zero. """) add_newdoc("scipy.special", "cotdg", """ cotdg(x) Cotangent of the angle `x` given in degrees. """) add_newdoc("scipy.special", "dawsn", """ dawsn(x) Dawson's integral. Computes:: exp(-x**2) * integral(exp(t**2), t=0..x). See Also -------- wofz, erf, erfc, erfcx, erfi References ---------- .. [1] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-15, 15, num=1000) >>> plt.plot(x, special.dawsn(x)) >>> plt.xlabel('$x$') >>> plt.ylabel('$dawsn(x)$') >>> plt.show() """) add_newdoc("scipy.special", "ellipe", r""" ellipe(m) Complete elliptic integral of the second kind This function is defined as .. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt Parameters ---------- m : array_like Defines the parameter of the elliptic integral. Returns ------- E : ndarray Value of the elliptic integral. Notes ----- Wrapper for the Cephes [1]_ routine `ellpe`. For `m > 0` the computation uses the approximation, .. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m), where :math:`P` and :math:`Q` are tenth-order polynomials. For `m < 0`, the relation .. math:: E(m) = E(m/(m - 1)) \sqrt(1-m) is used. The parameterization in terms of :math:`m` follows that of section 17.2 in [2]_. Other parameterizations in terms of the complementary parameter :math:`1 - m`, modular angle :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also used, so be careful that you choose the correct parameter. See Also -------- ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 ellipk : Complete elliptic integral of the first kind ellipkinc : Incomplete elliptic integral of the first kind ellipeinc : Incomplete elliptic integral of the second kind References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """) add_newdoc("scipy.special", "ellipeinc", r""" ellipeinc(phi, m) Incomplete elliptic integral of the second kind This function is defined as .. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt Parameters ---------- phi : array_like amplitude of the elliptic integral. m : array_like parameter of the elliptic integral. Returns ------- E : ndarray Value of the elliptic integral. Notes ----- Wrapper for the Cephes [1]_ routine `ellie`. Computation uses arithmetic-geometric means algorithm. The parameterization in terms of :math:`m` follows that of section 17.2 in [2]_. Other parameterizations in terms of the complementary parameter :math:`1 - m`, modular angle :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also used, so be careful that you choose the correct parameter. See Also -------- ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 ellipk : Complete elliptic integral of the first kind ellipkinc : Incomplete elliptic integral of the first kind ellipe : Complete elliptic integral of the second kind References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """) add_newdoc("scipy.special", "ellipj", """ ellipj(u, m) Jacobian elliptic functions Calculates the Jacobian elliptic functions of parameter `m` between 0 and 1, and real argument `u`. Parameters ---------- m : array_like Parameter. u : array_like Argument. Returns ------- sn, cn, dn, ph : ndarrays The returned functions:: sn(u|m), cn(u|m), dn(u|m) The value `ph` is such that if `u = ellipk(ph, m)`, then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`. Notes ----- Wrapper for the Cephes [1]_ routine `ellpj`. These functions are periodic, with quarter-period on the real axis equal to the complete elliptic integral `ellipk(m)`. Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then `sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called the amplitude of `u`. Computation is by means of the arithmetic-geometric mean algorithm, except when `m` is within 1e-9 of 0 or 1. In the latter case with `m` close to 1, the approximation applies only for `phi < pi/2`. See also -------- ellipk : Complete elliptic integral of the first kind. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "ellipkm1", """ ellipkm1(p) Complete elliptic integral of the first kind around `m` = 1 This function is defined as .. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt where `m = 1 - p`. Parameters ---------- p : array_like Defines the parameter of the elliptic integral as `m = 1 - p`. Returns ------- K : ndarray Value of the elliptic integral. Notes ----- Wrapper for the Cephes [1]_ routine `ellpk`. For `p <= 1`, computation uses the approximation, .. math:: K(p) \\approx P(p) - \\log(p) Q(p), where :math:`P` and :math:`Q` are tenth-order polynomials. The argument `p` is used internally rather than `m` so that the logarithmic singularity at `m = 1` will be shifted to the origin; this preserves maximum accuracy. For `p > 1`, the identity .. math:: K(p) = K(1/p)/\\sqrt(p) is used. See Also -------- ellipk : Complete elliptic integral of the first kind ellipkinc : Incomplete elliptic integral of the first kind ellipe : Complete elliptic integral of the second kind ellipeinc : Incomplete elliptic integral of the second kind References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "ellipkinc", r""" ellipkinc(phi, m) Incomplete elliptic integral of the first kind This function is defined as .. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt This function is also called `F(phi, m)`. Parameters ---------- phi : array_like amplitude of the elliptic integral m : array_like parameter of the elliptic integral Returns ------- K : ndarray Value of the elliptic integral Notes ----- Wrapper for the Cephes [1]_ routine `ellik`. The computation is carried out using the arithmetic-geometric mean algorithm. The parameterization in terms of :math:`m` follows that of section 17.2 in [2]_. Other parameterizations in terms of the complementary parameter :math:`1 - m`, modular angle :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also used, so be careful that you choose the correct parameter. See Also -------- ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 ellipk : Complete elliptic integral of the first kind ellipe : Complete elliptic integral of the second kind ellipeinc : Incomplete elliptic integral of the second kind References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """) add_newdoc("scipy.special", "entr", r""" entr(x) Elementwise function for computing entropy. .. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases} Parameters ---------- x : ndarray Input array. Returns ------- res : ndarray The value of the elementwise entropy function at the given points `x`. See Also -------- kl_div, rel_entr Notes ----- This function is concave. .. versionadded:: 0.15.0 """) add_newdoc("scipy.special", "erf", """ erf(z) Returns the error function of complex argument. It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``. Parameters ---------- x : ndarray Input array. Returns ------- res : ndarray The values of the error function at the given points `x`. See Also -------- erfc, erfinv, erfcinv, wofz, erfcx, erfi Notes ----- The cumulative of the unit normal distribution is given by ``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``. References ---------- .. [1] http://en.wikipedia.org/wiki/Error_function .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm .. [3] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-3, 3) >>> plt.plot(x, special.erf(x)) >>> plt.xlabel('$x$') >>> plt.ylabel('$erf(x)$') >>> plt.show() """) add_newdoc("scipy.special", "erfc", """ erfc(x) Complementary error function, ``1 - erf(x)``. See Also -------- erf, erfi, erfcx, dawsn, wofz References ---------- .. [1] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-3, 3) >>> plt.plot(x, special.erfc(x)) >>> plt.xlabel('$x$') >>> plt.ylabel('$erfc(x)$') >>> plt.show() """) add_newdoc("scipy.special", "erfi", """ erfi(z) Imaginary error function, ``-i erf(i z)``. See Also -------- erf, erfc, erfcx, dawsn, wofz Notes ----- .. versionadded:: 0.12.0 References ---------- .. [1] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-3, 3) >>> plt.plot(x, special.erfi(x)) >>> plt.xlabel('$x$') >>> plt.ylabel('$erfi(x)$') >>> plt.show() """) add_newdoc("scipy.special", "erfcx", """ erfcx(x) Scaled complementary error function, ``exp(x**2) * erfc(x)``. See Also -------- erf, erfc, erfi, dawsn, wofz Notes ----- .. versionadded:: 0.12.0 References ---------- .. [1] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-3, 3) >>> plt.plot(x, special.erfcx(x)) >>> plt.xlabel('$x$') >>> plt.ylabel('$erfcx(x)$') >>> plt.show() """) add_newdoc("scipy.special", "eval_jacobi", r""" eval_jacobi(n, alpha, beta, x, out=None) Evaluate Jacobi polynomial at a point. The Jacobi polynomials can be defined via the Gauss hypergeometric function :math:`{}_2F_1` as .. math:: P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)} {}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2) where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer the result is determined via the relation to the Gauss hypergeometric function. alpha : array_like Parameter beta : array_like Parameter x : array_like Points at which to evaluate the polynomial Returns ------- P : ndarray Values of the Jacobi polynomial See Also -------- roots_jacobi : roots and quadrature weights of Jacobi polynomials jacobi : Jacobi polynomial object hyp2f1 : Gauss hypergeometric function """) add_newdoc("scipy.special", "eval_sh_jacobi", r""" eval_sh_jacobi(n, p, q, x, out=None) Evaluate shifted Jacobi polynomial at a point. Defined by .. math:: G_n^{(p, q)}(x) = \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1), where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial. Parameters ---------- n : int Degree of the polynomial. If not an integer, the result is determined via the relation to `binom` and `eval_jacobi`. p : float Parameter q : float Parameter Returns ------- G : ndarray Values of the shifted Jacobi polynomial. See Also -------- roots_sh_jacobi : roots and quadrature weights of shifted Jacobi polynomials sh_jacobi : shifted Jacobi polynomial object eval_jacobi : evaluate Jacobi polynomials """) add_newdoc("scipy.special", "eval_gegenbauer", r""" eval_gegenbauer(n, alpha, x, out=None) Evaluate Gegenbauer polynomial at a point. The Gegenbauer polynomials can be defined via the Gauss hypergeometric function :math:`{}_2F_1` as .. math:: C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)} {}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to the Gauss hypergeometric function. alpha : array_like Parameter x : array_like Points at which to evaluate the Gegenbauer polynomial Returns ------- C : ndarray Values of the Gegenbauer polynomial See Also -------- roots_gegenbauer : roots and quadrature weights of Gegenbauer polynomials gegenbauer : Gegenbauer polynomial object hyp2f1 : Gauss hypergeometric function """) add_newdoc("scipy.special", "eval_chebyt", r""" eval_chebyt(n, x, out=None) Evaluate Chebyshev polynomial of the first kind at a point. The Chebyshev polynomials of the first kind can be defined via the Gauss hypergeometric function :math:`{}_2F_1` as .. math:: T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to the Gauss hypergeometric function. x : array_like Points at which to evaluate the Chebyshev polynomial Returns ------- T : ndarray Values of the Chebyshev polynomial See Also -------- roots_chebyt : roots and quadrature weights of Chebyshev polynomials of the first kind chebyu : Chebychev polynomial object eval_chebyu : evaluate Chebyshev polynomials of the second kind hyp2f1 : Gauss hypergeometric function numpy.polynomial.chebyshev.Chebyshev : Chebyshev series Notes ----- This routine is numerically stable for `x` in ``[-1, 1]`` at least up to order ``10000``. """) add_newdoc("scipy.special", "eval_chebyu", r""" eval_chebyu(n, x, out=None) Evaluate Chebyshev polynomial of the second kind at a point. The Chebyshev polynomials of the second kind can be defined via the Gauss hypergeometric function :math:`{}_2F_1` as .. math:: U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to the Gauss hypergeometric function. x : array_like Points at which to evaluate the Chebyshev polynomial Returns ------- U : ndarray Values of the Chebyshev polynomial See Also -------- roots_chebyu : roots and quadrature weights of Chebyshev polynomials of the second kind chebyu : Chebyshev polynomial object eval_chebyt : evaluate Chebyshev polynomials of the first kind hyp2f1 : Gauss hypergeometric function """) add_newdoc("scipy.special", "eval_chebys", r""" eval_chebys(n, x, out=None) Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point. These polynomials are defined as .. math:: S_n(x) = U_n(x/2) where :math:`U_n` is a Chebyshev polynomial of the second kind. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to `eval_chebyu`. x : array_like Points at which to evaluate the Chebyshev polynomial Returns ------- S : ndarray Values of the Chebyshev polynomial See Also -------- roots_chebys : roots and quadrature weights of Chebyshev polynomials of the second kind on [-2, 2] chebys : Chebyshev polynomial object eval_chebyu : evaluate Chebyshev polynomials of the second kind """) add_newdoc("scipy.special", "eval_chebyc", r""" eval_chebyc(n, x, out=None) Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point. These polynomials are defined as .. math:: S_n(x) = T_n(x/2) where :math:`T_n` is a Chebyshev polynomial of the first kind. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to `eval_chebyt`. x : array_like Points at which to evaluate the Chebyshev polynomial Returns ------- C : ndarray Values of the Chebyshev polynomial See Also -------- roots_chebyc : roots and quadrature weights of Chebyshev polynomials of the first kind on [-2, 2] chebyc : Chebyshev polynomial object numpy.polynomial.chebyshev.Chebyshev : Chebyshev series eval_chebyt : evaluate Chebycshev polynomials of the first kind """) add_newdoc("scipy.special", "eval_sh_chebyt", r""" eval_sh_chebyt(n, x, out=None) Evaluate shifted Chebyshev polynomial of the first kind at a point. These polynomials are defined as .. math:: T_n^*(x) = T_n(2x - 1) where :math:`T_n` is a Chebyshev polynomial of the first kind. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to `eval_chebyt`. x : array_like Points at which to evaluate the shifted Chebyshev polynomial Returns ------- T : ndarray Values of the shifted Chebyshev polynomial See Also -------- roots_sh_chebyt : roots and quadrature weights of shifted Chebyshev polynomials of the first kind sh_chebyt : shifted Chebyshev polynomial object eval_chebyt : evaluate Chebyshev polynomials of the first kind numpy.polynomial.chebyshev.Chebyshev : Chebyshev series """) add_newdoc("scipy.special", "eval_sh_chebyu", r""" eval_sh_chebyu(n, x, out=None) Evaluate shifted Chebyshev polynomial of the second kind at a point. These polynomials are defined as .. math:: U_n^*(x) = U_n(2x - 1) where :math:`U_n` is a Chebyshev polynomial of the first kind. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to `eval_chebyu`. x : array_like Points at which to evaluate the shifted Chebyshev polynomial Returns ------- U : ndarray Values of the shifted Chebyshev polynomial See Also -------- roots_sh_chebyu : roots and quadrature weights of shifted Chebychev polynomials of the second kind sh_chebyu : shifted Chebyshev polynomial object eval_chebyu : evaluate Chebyshev polynomials of the second kind """) add_newdoc("scipy.special", "eval_legendre", r""" eval_legendre(n, x, out=None) Evaluate Legendre polynomial at a point. The Legendre polynomials can be defined via the Gauss hypergeometric function :math:`{}_2F_1` as .. math:: P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the result is determined via the relation to the Gauss hypergeometric function. x : array_like Points at which to evaluate the Legendre polynomial Returns ------- P : ndarray Values of the Legendre polynomial See Also -------- roots_legendre : roots and quadrature weights of Legendre polynomials legendre : Legendre polynomial object hyp2f1 : Gauss hypergeometric function numpy.polynomial.legendre.Legendre : Legendre series """) add_newdoc("scipy.special", "eval_sh_legendre", r""" eval_sh_legendre(n, x, out=None) Evaluate shifted Legendre polynomial at a point. These polynomials are defined as .. math:: P_n^*(x) = P_n(2x - 1) where :math:`P_n` is a Legendre polynomial. Parameters ---------- n : array_like Degree of the polynomial. If not an integer, the value is determined via the relation to `eval_legendre`. x : array_like Points at which to evaluate the shifted Legendre polynomial Returns ------- P : ndarray Values of the shifted Legendre polynomial See Also -------- roots_sh_legendre : roots and quadrature weights of shifted Legendre polynomials sh_legendre : shifted Legendre polynomial object eval_legendre : evaluate Legendre polynomials numpy.polynomial.legendre.Legendre : Legendre series """) add_newdoc("scipy.special", "eval_genlaguerre", r""" eval_genlaguerre(n, alpha, x, out=None) Evaluate generalized Laguerre polynomial at a point. The generalized Laguerre polynomials can be defined via the confluent hypergeometric function :math:`{}_1F_1` as .. math:: L_n^{(\alpha)}(x) = \binom{n + \alpha}{n} {}_1F_1(-n, \alpha + 1, x). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. The Laguerre polynomials are the special case where :math:`\alpha = 0`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer the result is determined via the relation to the confluent hypergeometric function. alpha : array_like Parameter; must have ``alpha > -1`` x : array_like Points at which to evaluate the generalized Laguerre polynomial Returns ------- L : ndarray Values of the generalized Laguerre polynomial See Also -------- roots_genlaguerre : roots and quadrature weights of generalized Laguerre polynomials genlaguerre : generalized Laguerre polynomial object hyp1f1 : confluent hypergeometric function eval_laguerre : evaluate Laguerre polynomials """) add_newdoc("scipy.special", "eval_laguerre", r""" eval_laguerre(n, x, out=None) Evaluate Laguerre polynomial at a point. The Laguerre polynomials can be defined via the confluent hypergeometric function :math:`{}_1F_1` as .. math:: L_n(x) = {}_1F_1(-n, 1, x). When :math:`n` is an integer the result is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial. If not an integer the result is determined via the relation to the confluent hypergeometric function. x : array_like Points at which to evaluate the Laguerre polynomial Returns ------- L : ndarray Values of the Laguerre polynomial See Also -------- roots_laguerre : roots and quadrature weights of Laguerre polynomials laguerre : Laguerre polynomial object numpy.polynomial.laguerre.Laguerre : Laguerre series eval_genlaguerre : evaluate generalized Laguerre polynomials """) add_newdoc("scipy.special", "eval_hermite", r""" eval_hermite(n, x, out=None) Evaluate physicist's Hermite polynomial at a point. Defined by .. math:: H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2}; :math:`H_n` is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial x : array_like Points at which to evaluate the Hermite polynomial Returns ------- H : ndarray Values of the Hermite polynomial See Also -------- roots_hermite : roots and quadrature weights of physicist's Hermite polynomials hermite : physicist's Hermite polynomial object numpy.polynomial.hermite.Hermite : Physicist's Hermite series eval_hermitenorm : evaluate Probabilist's Hermite polynomials """) add_newdoc("scipy.special", "eval_hermitenorm", r""" eval_hermitenorm(n, x, out=None) Evaluate probabilist's (normalized) Hermite polynomial at a point. Defined by .. math:: He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2}; :math:`He_n` is a polynomial of degree :math:`n`. Parameters ---------- n : array_like Degree of the polynomial x : array_like Points at which to evaluate the Hermite polynomial Returns ------- He : ndarray Values of the Hermite polynomial See Also -------- roots_hermitenorm : roots and quadrature weights of probabilist's Hermite polynomials hermitenorm : probabilist's Hermite polynomial object numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series eval_hermite : evaluate physicist's Hermite polynomials """) add_newdoc("scipy.special", "exp1", """ exp1(z) Exponential integral E_1 of complex argument z :: integral(exp(-z*t)/t, t=1..inf). """) add_newdoc("scipy.special", "exp10", """ exp10(x) Compute ``10**x`` element-wise. Parameters ---------- x : array_like `x` must contain real numbers. Returns ------- float ``10**x``, computed element-wise. Examples -------- >>> from scipy.special import exp10 >>> exp10(3) 1000.0 >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) >>> exp10(x) array([[ 0.1 , 0.31622777, 1. ], [ 3.16227766, 10. , 31.6227766 ]]) """) add_newdoc("scipy.special", "exp2", """ exp2(x) Compute ``2**x`` element-wise. Parameters ---------- x : array_like `x` must contain real numbers. Returns ------- float ``2**x``, computed element-wise. Examples -------- >>> from scipy.special import exp2 >>> exp2(3) 8.0 >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) >>> exp2(x) array([[ 0.5 , 0.70710678, 1. ], [ 1.41421356, 2. , 2.82842712]]) """) add_newdoc("scipy.special", "expi", """ expi(x) Exponential integral Ei Defined as:: integral(exp(t)/t, t=-inf..x) See `expn` for a different exponential integral. """) add_newdoc('scipy.special', 'expit', """ expit(x) Expit ufunc for ndarrays. The expit function, also known as the logistic function, is defined as expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function. Parameters ---------- x : ndarray The ndarray to apply expit to element-wise. Returns ------- out : ndarray An ndarray of the same shape as x. Its entries are expit of the corresponding entry of x. See Also -------- logit Notes ----- As a ufunc expit takes a number of optional keyword arguments. For more information see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_ .. versionadded:: 0.10.0 Examples -------- >>> from scipy.special import expit, logit >>> expit([-np.inf, -1.5, 0, 1.5, np.inf]) array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ]) `logit` is the inverse of `expit`: >>> logit(expit([-2.5, 0, 3.1, 5.0])) array([-2.5, 0. , 3.1, 5. ]) Plot expit(x) for x in [-6, 6]: >>> import matplotlib.pyplot as plt >>> x = np.linspace(-6, 6, 121) >>> y = expit(x) >>> plt.plot(x, y) >>> plt.grid() >>> plt.xlim(-6, 6) >>> plt.xlabel('x') >>> plt.title('expit(x)') >>> plt.show() """) add_newdoc("scipy.special", "expm1", """ expm1(x) Compute ``exp(x) - 1``. When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation of ``exp(x) - 1`` can suffer from catastrophic loss of precision. ``expm1(x)`` is implemented to avoid the loss of precision that occurs when `x` is near zero. Parameters ---------- x : array_like `x` must contain real numbers. Returns ------- float ``exp(x) - 1`` computed element-wise. Examples -------- >>> from scipy.special import expm1 >>> expm1(1.0) 1.7182818284590451 >>> expm1([-0.2, -0.1, 0, 0.1, 0.2]) array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276]) The exact value of ``exp(7.5e-13) - 1`` is:: 7.5000000000028125000000007031250000001318...*10**-13. Here is what ``expm1(7.5e-13)`` gives: >>> expm1(7.5e-13) 7.5000000000028135e-13 Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in a "catastrophic" loss of precision: >>> np.exp(7.5e-13) - 1 7.5006667543675576e-13 """) add_newdoc("scipy.special", "expn", """ expn(n, x) Exponential integral E_n Returns the exponential integral for integer `n` and non-negative `x` and `n`:: integral(exp(-x*t) / t**n, t=1..inf). """) add_newdoc("scipy.special", "exprel", r""" exprel(x) Relative error exponential, ``(exp(x) - 1)/x``. When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation of ``exp(x) - 1`` can suffer from catastrophic loss of precision. ``exprel(x)`` is implemented to avoid the loss of precision that occurs when `x` is near zero. Parameters ---------- x : ndarray Input array. `x` must contain real numbers. Returns ------- float ``(exp(x) - 1)/x``, computed element-wise. See Also -------- expm1 Notes ----- .. versionadded:: 0.17.0 Examples -------- >>> from scipy.special import exprel >>> exprel(0.01) 1.0050167084168056 >>> exprel([-0.25, -0.1, 0, 0.1, 0.25]) array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167]) Compare ``exprel(5e-9)`` to the naive calculation. The exact value is ``1.00000000250000000416...``. >>> exprel(5e-9) 1.0000000025 >>> (np.exp(5e-9) - 1)/5e-9 0.99999999392252903 """) add_newdoc("scipy.special", "fdtr", r""" fdtr(dfn, dfd, x) F cumulative distribution function. Returns the value of the cumulative density function of the F-distribution, also known as Snedecor's F-distribution or the Fisher-Snedecor distribution. The F-distribution with parameters :math:`d_n` and :math:`d_d` is the distribution of the random variable, .. math:: X = \frac{U_n/d_n}{U_d/d_d}, where :math:`U_n` and :math:`U_d` are random variables distributed :math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom, respectively. Parameters ---------- dfn : array_like First parameter (positive float). dfd : array_like Second parameter (positive float). x : array_like Argument (nonnegative float). Returns ------- y : ndarray The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`. Notes ----- The regularized incomplete beta function is used, according to the formula, .. math:: F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2). Wrapper for the Cephes [1]_ routine `fdtr`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "fdtrc", r""" fdtrc(dfn, dfd, x) F survival function. Returns the complemented F-distribution function (the integral of the density from `x` to infinity). Parameters ---------- dfn : array_like First parameter (positive float). dfd : array_like Second parameter (positive float). x : array_like Argument (nonnegative float). Returns ------- y : ndarray The complemented F-distribution function with parameters `dfn` and `dfd` at `x`. See also -------- fdtr Notes ----- The regularized incomplete beta function is used, according to the formula, .. math:: F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2). Wrapper for the Cephes [1]_ routine `fdtrc`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "fdtri", r""" fdtri(dfn, dfd, p) The `p`-th quantile of the F-distribution. This function is the inverse of the F-distribution CDF, `fdtr`, returning the `x` such that `fdtr(dfn, dfd, x) = p`. Parameters ---------- dfn : array_like First parameter (positive float). dfd : array_like Second parameter (positive float). p : array_like Cumulative probability, in [0, 1]. Returns ------- x : ndarray The quantile corresponding to `p`. Notes ----- The computation is carried out using the relation to the inverse regularized beta function, :math:`I^{-1}_x(a, b)`. Let :math:`z = I^{-1}_p(d_d/2, d_n/2).` Then, .. math:: x = \frac{d_d (1 - z)}{d_n z}. If `p` is such that :math:`x < 0.5`, the following relation is used instead for improved stability: let :math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then, .. math:: x = \frac{d_d z'}{d_n (1 - z')}. Wrapper for the Cephes [1]_ routine `fdtri`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "fdtridfd", """ fdtridfd(dfn, p, x) Inverse to `fdtr` vs dfd Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``. """) add_newdoc("scipy.special", "fdtridfn", """ fdtridfn(p, dfd, x) Inverse to `fdtr` vs dfn finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``. """) add_newdoc("scipy.special", "fresnel", """ fresnel(z) Fresnel sin and cos integrals Defined as:: ssa = integral(sin(pi/2 * t**2), t=0..z) csa = integral(cos(pi/2 * t**2), t=0..z) Parameters ---------- z : float or complex array_like Argument Returns ------- ssa, csa Fresnel sin and cos integral values """) add_newdoc("scipy.special", "gamma", r""" gamma(z) Gamma function. .. math:: \Gamma(z) = \int_0^\infty x^{z-1} e^{-x} dx = (z - 1)! The gamma function is often referred to as the generalized factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) = n!`` for natural number *n*. Parameters ---------- z : float or complex array_like Returns ------- float or complex The value(s) of gamma(z) Examples -------- >>> from scipy.special import gamma, factorial >>> gamma([0, 0.5, 1, 5]) array([ inf, 1.77245385, 1. , 24. ]) >>> z = 2.5 + 1j >>> gamma(z) (0.77476210455108352+0.70763120437959293j) >>> gamma(z+1), z*gamma(z) # Recurrence property ((1.2292740569981171+2.5438401155000685j), (1.2292740569981158+2.5438401155000658j)) >>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi) 3.1415926535897927 Plot gamma(x) for real x >>> x = np.linspace(-3.5, 5.5, 2251) >>> y = gamma(x) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)') >>> k = np.arange(1, 7) >>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6, ... label='(x-1)!, x = 1, 2, ...') >>> plt.xlim(-3.5, 5.5) >>> plt.ylim(-10, 25) >>> plt.grid() >>> plt.xlabel('x') >>> plt.legend(loc='lower right') >>> plt.show() """) add_newdoc("scipy.special", "gammainc", r""" gammainc(a, x) Regularized lower incomplete gamma function. Defined as .. math:: \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper incomplete gamma function. Notes ----- The implementation largely follows that of [1]_. See also -------- gammaincc : regularized upper incomplete gamma function gammaincinv : inverse to ``gammainc`` versus ``x`` gammainccinv : inverse to ``gammaincc`` versus ``x`` References ---------- .. [1] Maddock et. al., "Incomplete Gamma Functions", http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html """) add_newdoc("scipy.special", "gammaincc", r""" gammaincc(a, x) Regularized upper incomplete gamma function. Defined as .. math:: \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc` is the regularized lower incomplete gamma function. Notes ----- The implementation largely follows that of [1]_. See also -------- gammainc : regularized lower incomplete gamma function gammaincinv : inverse to ``gammainc`` versus ``x`` gammainccinv : inverse to ``gammaincc`` versus ``x`` References ---------- .. [1] Maddock et. al., "Incomplete Gamma Functions", http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html """) add_newdoc("scipy.special", "gammainccinv", """ gammainccinv(a, y) Inverse to `gammaincc` Returns `x` such that ``gammaincc(a, x) == y``. """) add_newdoc("scipy.special", "gammaincinv", """ gammaincinv(a, y) Inverse to `gammainc` Returns `x` such that ``gammainc(a, x) = y``. """) add_newdoc("scipy.special", "gammaln", """ Logarithm of the absolute value of the Gamma function. Parameters ---------- x : array-like Values on the real line at which to compute ``gammaln`` Returns ------- gammaln : ndarray Values of ``gammaln`` at x. See Also -------- gammasgn : sign of the gamma function loggamma : principal branch of the logarithm of the gamma function Notes ----- When used in conjunction with `gammasgn`, this function is useful for working in logspace on the real axis without having to deal with complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``. For complex-valued log-gamma, use `loggamma` instead of `gammaln`. """) add_newdoc("scipy.special", "gammasgn", """ gammasgn(x) Sign of the gamma function. See Also -------- gammaln loggamma """) add_newdoc("scipy.special", "gdtr", r""" gdtr(a, b, x) Gamma distribution cumulative density function. Returns the integral from zero to `x` of the gamma probability density function, .. math:: F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, where :math:`\Gamma` is the gamma function. Parameters ---------- a : array_like The rate parameter of the gamma distribution, sometimes denoted :math:`\beta` (float). It is also the reciprocal of the scale parameter :math:`\theta`. b : array_like The shape parameter of the gamma distribution, sometimes denoted :math:`\alpha` (float). x : array_like The quantile (upper limit of integration; float). See also -------- gdtrc : 1 - CDF of the gamma distribution. Returns ------- F : ndarray The CDF of the gamma distribution with parameters `a` and `b` evaluated at `x`. Notes ----- The evaluation is carried out using the relation to the incomplete gamma integral (regularized gamma function). Wrapper for the Cephes [1]_ routine `gdtr`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "gdtrc", r""" gdtrc(a, b, x) Gamma distribution survival function. Integral from `x` to infinity of the gamma probability density function, .. math:: F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, where :math:`\Gamma` is the gamma function. Parameters ---------- a : array_like The rate parameter of the gamma distribution, sometimes denoted :math:`\beta` (float). It is also the reciprocal of the scale parameter :math:`\theta`. b : array_like The shape parameter of the gamma distribution, sometimes denoted :math:`\alpha` (float). x : array_like The quantile (lower limit of integration; float). Returns ------- F : ndarray The survival function of the gamma distribution with parameters `a` and `b` evaluated at `x`. See Also -------- gdtr, gdtri Notes ----- The evaluation is carried out using the relation to the incomplete gamma integral (regularized gamma function). Wrapper for the Cephes [1]_ routine `gdtrc`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "gdtria", """ gdtria(p, b, x, out=None) Inverse of `gdtr` vs a. Returns the inverse with respect to the parameter `a` of ``p = gdtr(a, b, x)``, the cumulative distribution function of the gamma distribution. Parameters ---------- p : array_like Probability values. b : array_like `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter of the gamma distribution. x : array_like Nonnegative real values, from the domain of the gamma distribution. out : ndarray, optional If a fourth argument is given, it must be a numpy.ndarray whose size matches the broadcast result of `a`, `b` and `x`. `out` is then the array returned by the function. Returns ------- a : ndarray Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a` is the "scale" parameter of the gamma distribution. See Also -------- gdtr : CDF of the gamma distribution. gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. The cumulative distribution function `p` is computed using a routine by DiDinato and Morris [2]_. Computation of `a` involves a search for a value that produces the desired value of `p`. The search relies on the monotonicity of `p` with `a`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] DiDinato, A. R. and Morris, A. H., Computation of the incomplete gamma function ratios and their inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. Examples -------- First evaluate `gdtr`. >>> from scipy.special import gdtr, gdtria >>> p = gdtr(1.2, 3.4, 5.6) >>> print(p) 0.94378087442 Verify the inverse. >>> gdtria(p, 3.4, 5.6) 1.2 """) add_newdoc("scipy.special", "gdtrib", """ gdtrib(a, p, x, out=None) Inverse of `gdtr` vs b. Returns the inverse with respect to the parameter `b` of ``p = gdtr(a, b, x)``, the cumulative distribution function of the gamma distribution. Parameters ---------- a : array_like `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" parameter of the gamma distribution. p : array_like Probability values. x : array_like Nonnegative real values, from the domain of the gamma distribution. out : ndarray, optional If a fourth argument is given, it must be a numpy.ndarray whose size matches the broadcast result of `a`, `b` and `x`. `out` is then the array returned by the function. Returns ------- b : ndarray Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is the "shape" parameter of the gamma distribution. See Also -------- gdtr : CDF of the gamma distribution. gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. The cumulative distribution function `p` is computed using a routine by DiDinato and Morris [2]_. Computation of `b` involves a search for a value that produces the desired value of `p`. The search relies on the monotonicity of `p` with `b`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] DiDinato, A. R. and Morris, A. H., Computation of the incomplete gamma function ratios and their inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. Examples -------- First evaluate `gdtr`. >>> from scipy.special import gdtr, gdtrib >>> p = gdtr(1.2, 3.4, 5.6) >>> print(p) 0.94378087442 Verify the inverse. >>> gdtrib(1.2, p, 5.6) 3.3999999999723882 """) add_newdoc("scipy.special", "gdtrix", """ gdtrix(a, b, p, out=None) Inverse of `gdtr` vs x. Returns the inverse with respect to the parameter `x` of ``p = gdtr(a, b, x)``, the cumulative distribution function of the gamma distribution. This is also known as the p'th quantile of the distribution. Parameters ---------- a : array_like `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" parameter of the gamma distribution. b : array_like `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter of the gamma distribution. p : array_like Probability values. out : ndarray, optional If a fourth argument is given, it must be a numpy.ndarray whose size matches the broadcast result of `a`, `b` and `x`. `out` is then the array returned by the function. Returns ------- x : ndarray Values of the `x` parameter such that `p = gdtr(a, b, x)`. See Also -------- gdtr : CDF of the gamma distribution. gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. The cumulative distribution function `p` is computed using a routine by DiDinato and Morris [2]_. Computation of `x` involves a search for a value that produces the desired value of `p`. The search relies on the monotonicity of `p` with `x`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] DiDinato, A. R. and Morris, A. H., Computation of the incomplete gamma function ratios and their inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. Examples -------- First evaluate `gdtr`. >>> from scipy.special import gdtr, gdtrix >>> p = gdtr(1.2, 3.4, 5.6) >>> print(p) 0.94378087442 Verify the inverse. >>> gdtrix(1.2, 3.4, p) 5.5999999999999996 """) add_newdoc("scipy.special", "hankel1", r""" hankel1(v, z) Hankel function of the first kind Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- out : Values of the Hankel function of the first kind. Notes ----- A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the computation using the relation, .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) where :math:`K_v` is the modified Bessel function of the second kind. For negative orders, the relation .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) is used. See also -------- hankel1e : this function with leading exponential behavior stripped off. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "hankel1e", r""" hankel1e(v, z) Exponentially scaled Hankel function of the first kind Defined as:: hankel1e(v, z) = hankel1(v, z) * exp(-1j * z) Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- out : Values of the exponentially scaled Hankel function. Notes ----- A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the computation using the relation, .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) where :math:`K_v` is the modified Bessel function of the second kind. For negative orders, the relation .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) is used. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "hankel2", r""" hankel2(v, z) Hankel function of the second kind Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- out : Values of the Hankel function of the second kind. Notes ----- A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the computation using the relation, .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2)) where :math:`K_v` is the modified Bessel function of the second kind. For negative orders, the relation .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) is used. See also -------- hankel2e : this function with leading exponential behavior stripped off. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "hankel2e", r""" hankel2e(v, z) Exponentially scaled Hankel function of the second kind Defined as:: hankel2e(v, z) = hankel2(v, z) * exp(1j * z) Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- out : Values of the exponentially scaled Hankel function of the second kind. Notes ----- A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the computation using the relation, .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2})) where :math:`K_v` is the modified Bessel function of the second kind. For negative orders, the relation .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) is used. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "huber", r""" huber(delta, r) Huber loss function. .. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases} Parameters ---------- delta : ndarray Input array, indicating the quadratic vs. linear loss changepoint. r : ndarray Input array, possibly representing residuals. Returns ------- res : ndarray The computed Huber loss function values. Notes ----- This function is convex in r. .. versionadded:: 0.15.0 """) add_newdoc("scipy.special", "hyp0f1", r""" hyp0f1(v, x) Confluent hypergeometric limit function 0F1. Parameters ---------- v, z : array_like Input values. Returns ------- hyp0f1 : ndarray The confluent hypergeometric limit function. Notes ----- This function is defined as: .. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}. It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`, and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`. """) add_newdoc("scipy.special", "hyp1f1", """ hyp1f1(a, b, x) Confluent hypergeometric function 1F1(a, b; x) """) add_newdoc("scipy.special", "hyp1f2", """ hyp1f2(a, b, c, x) Hypergeometric function 1F2 and error estimate Returns ------- y Value of the function err Error estimate """) add_newdoc("scipy.special", "hyp2f0", """ hyp2f0(a, b, x, type) Hypergeometric function 2F0 in y and an error estimate The parameter `type` determines a convergence factor and can be either 1 or 2. Returns ------- y Value of the function err Error estimate """) add_newdoc("scipy.special", "hyp2f1", r""" hyp2f1(a, b, c, z) Gauss hypergeometric function 2F1(a, b; c; z) Parameters ---------- a, b, c : array_like Arguments, should be real-valued. z : array_like Argument, real or complex. Returns ------- hyp2f1 : scalar or ndarray The values of the gaussian hypergeometric function. See also -------- hyp0f1 : confluent hypergeometric limit function. hyp1f1 : Kummer's (confluent hypergeometric) function. Notes ----- This function is defined for :math:`|z| < 1` as .. math:: \mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty \frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!}, and defined on the rest of the complex z-plane by analytic continuation. Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When :math:`n` is an integer the result is a polynomial of degree :math:`n`. The implementation for complex values of ``z`` is described in [1]_. References ---------- .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996 .. [2] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [3] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/ """) add_newdoc("scipy.special", "hyp3f0", """ hyp3f0(a, b, c, x) Hypergeometric function 3F0 in y and an error estimate Returns ------- y Value of the function err Error estimate """) add_newdoc("scipy.special", "hyperu", """ hyperu(a, b, x) Confluent hypergeometric function U(a, b, x) of the second kind """) add_newdoc("scipy.special", "i0", r""" i0(x) Modified Bessel function of order 0. Defined as, .. math:: I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x), where :math:`J_0` is the Bessel function of the first kind of order 0. Parameters ---------- x : array_like Argument (float) Returns ------- I : ndarray Value of the modified Bessel function of order 0 at `x`. Notes ----- The range is partitioned into the two intervals [0, 8] and (8, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `i0`. See also -------- iv i0e References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "i0e", """ i0e(x) Exponentially scaled modified Bessel function of order 0. Defined as:: i0e(x) = exp(-abs(x)) * i0(x). Parameters ---------- x : array_like Argument (float) Returns ------- I : ndarray Value of the exponentially scaled modified Bessel function of order 0 at `x`. Notes ----- The range is partitioned into the two intervals [0, 8] and (8, infinity). Chebyshev polynomial expansions are employed in each interval. The polynomial expansions used are the same as those in `i0`, but they are not multiplied by the dominant exponential factor. This function is a wrapper for the Cephes [1]_ routine `i0e`. See also -------- iv i0 References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "i1", r""" i1(x) Modified Bessel function of order 1. Defined as, .. math:: I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!} = -\imath J_1(\imath x), where :math:`J_1` is the Bessel function of the first kind of order 1. Parameters ---------- x : array_like Argument (float) Returns ------- I : ndarray Value of the modified Bessel function of order 1 at `x`. Notes ----- The range is partitioned into the two intervals [0, 8] and (8, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `i1`. See also -------- iv i1e References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "i1e", """ i1e(x) Exponentially scaled modified Bessel function of order 1. Defined as:: i1e(x) = exp(-abs(x)) * i1(x) Parameters ---------- x : array_like Argument (float) Returns ------- I : ndarray Value of the exponentially scaled modified Bessel function of order 1 at `x`. Notes ----- The range is partitioned into the two intervals [0, 8] and (8, infinity). Chebyshev polynomial expansions are employed in each interval. The polynomial expansions used are the same as those in `i1`, but they are not multiplied by the dominant exponential factor. This function is a wrapper for the Cephes [1]_ routine `i1e`. See also -------- iv i1 References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "_igam_fac", """ Internal function, do not use. """) add_newdoc("scipy.special", "it2i0k0", """ it2i0k0(x) Integrals related to modified Bessel functions of order 0 Returns ------- ii0 ``integral((i0(t)-1)/t, t=0..x)`` ik0 ``int(k0(t)/t, t=x..inf)`` """) add_newdoc("scipy.special", "it2j0y0", """ it2j0y0(x) Integrals related to Bessel functions of order 0 Returns ------- ij0 ``integral((1-j0(t))/t, t=0..x)`` iy0 ``integral(y0(t)/t, t=x..inf)`` """) add_newdoc("scipy.special", "it2struve0", r""" it2struve0(x) Integral related to the Struve function of order 0. Returns the integral, .. math:: \int_x^\infty \frac{H_0(t)}{t}\,dt where :math:`H_0` is the Struve function of order 0. Parameters ---------- x : array_like Lower limit of integration. Returns ------- I : ndarray The value of the integral. See also -------- struve Notes ----- Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """) add_newdoc("scipy.special", "itairy", """ itairy(x) Integrals of Airy functions Calculates the integrals of Airy functions from 0 to `x`. Parameters ---------- x: array_like Upper limit of integration (float). Returns ------- Apt Integral of Ai(t) from 0 to x. Bpt Integral of Bi(t) from 0 to x. Ant Integral of Ai(-t) from 0 to x. Bnt Integral of Bi(-t) from 0 to x. Notes ----- Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """) add_newdoc("scipy.special", "iti0k0", """ iti0k0(x) Integrals of modified Bessel functions of order 0 Returns simple integrals from 0 to `x` of the zeroth order modified Bessel functions `i0` and `k0`. Returns ------- ii0, ik0 """) add_newdoc("scipy.special", "itj0y0", """ itj0y0(x) Integrals of Bessel functions of order 0 Returns simple integrals from 0 to `x` of the zeroth order Bessel functions `j0` and `y0`. Returns ------- ij0, iy0 """) add_newdoc("scipy.special", "itmodstruve0", r""" itmodstruve0(x) Integral of the modified Struve function of order 0. .. math:: I = \int_0^x L_0(t)\,dt Parameters ---------- x : array_like Upper limit of integration (float). Returns ------- I : ndarray The integral of :math:`L_0` from 0 to `x`. Notes ----- Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """) add_newdoc("scipy.special", "itstruve0", r""" itstruve0(x) Integral of the Struve function of order 0. .. math:: I = \int_0^x H_0(t)\,dt Parameters ---------- x : array_like Upper limit of integration (float). Returns ------- I : ndarray The integral of :math:`H_0` from 0 to `x`. See also -------- struve Notes ----- Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """) add_newdoc("scipy.special", "iv", r""" iv(v, z) Modified Bessel function of the first kind of real order. Parameters ---------- v : array_like Order. If `z` is of real type and negative, `v` must be integer valued. z : array_like of float or complex Argument. Returns ------- out : ndarray Values of the modified Bessel function. Notes ----- For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out using Temme's method [1]_. For larger orders, uniform asymptotic expansions are applied. For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is called. It uses a power series for small `z`, the asymptotic expansion for large `abs(z)`, the Miller algorithm normalized by the Wronskian and a Neumann series for intermediate magnitudes, and the uniform asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders. Backward recurrence is used to generate sequences or reduce orders when necessary. The calculations above are done in the right half plane and continued into the left half plane by the formula, .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) (valid when the real part of `z` is positive). For negative `v`, the formula .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) is used, where :math:`K_v(z)` is the modified Bessel function of the second kind, evaluated using the AMOS routine `zbesk`. See also -------- kve : This function with leading exponential behavior stripped off. References ---------- .. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976) .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "ive", r""" ive(v, z) Exponentially scaled modified Bessel function of the first kind Defined as:: ive(v, z) = iv(v, z) * exp(-abs(z.real)) Parameters ---------- v : array_like of float Order. z : array_like of float or complex Argument. Returns ------- out : ndarray Values of the exponentially scaled modified Bessel function. Notes ----- For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a power series for small `z`, the asymptotic expansion for large `abs(z)`, the Miller algorithm normalized by the Wronskian and a Neumann series for intermediate magnitudes, and the uniform asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders. Backward recurrence is used to generate sequences or reduce orders when necessary. The calculations above are done in the right half plane and continued into the left half plane by the formula, .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) (valid when the real part of `z` is positive). For negative `v`, the formula .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) is used, where :math:`K_v(z)` is the modified Bessel function of the second kind, evaluated using the AMOS routine `zbesk`. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "j0", r""" j0(x) Bessel function of the first kind of order 0. Parameters ---------- x : array_like Argument (float). Returns ------- J : ndarray Value of the Bessel function of the first kind of order 0 at `x`. Notes ----- The domain is divided into the intervals [0, 5] and (5, infinity). In the first interval the following rational approximation is used: .. math:: J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)}, where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of :math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3 and 8, respectively. In the second interval, the Hankel asymptotic expansion is employed with two rational functions of degree 6/6 and 7/7. This function is a wrapper for the Cephes [1]_ routine `j0`. It should not be confused with the spherical Bessel functions (see `spherical_jn`). See also -------- jv : Bessel function of real order and complex argument. spherical_jn : spherical Bessel functions. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "j1", """ j1(x) Bessel function of the first kind of order 1. Parameters ---------- x : array_like Argument (float). Returns ------- J : ndarray Value of the Bessel function of the first kind of order 1 at `x`. Notes ----- The domain is divided into the intervals [0, 8] and (8, infinity). In the first interval a 24 term Chebyshev expansion is used. In the second, the asymptotic trigonometric representation is employed using two rational functions of degree 5/5. This function is a wrapper for the Cephes [1]_ routine `j1`. It should not be confused with the spherical Bessel functions (see `spherical_jn`). See also -------- jv spherical_jn : spherical Bessel functions. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "jn", """ jn(n, x) Bessel function of the first kind of integer order and real argument. Notes ----- `jn` is an alias of `jv`. Not to be confused with the spherical Bessel functions (see `spherical_jn`). See also -------- jv spherical_jn : spherical Bessel functions. """) add_newdoc("scipy.special", "jv", r""" jv(v, z) Bessel function of the first kind of real order and complex argument. Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- J : ndarray Value of the Bessel function, :math:`J_v(z)`. Notes ----- For positive `v` values, the computation is carried out using the AMOS [1]_ `zbesj` routine, which exploits the connection to the modified Bessel function :math:`I_v`, .. math:: J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) For negative `v` values the formula, .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) is used, where :math:`Y_v(z)` is the Bessel function of the second kind, computed using the AMOS routine `zbesy`. Note that the second term is exactly zero for integer `v`; to improve accuracy the second term is explicitly omitted for `v` values such that `v = floor(v)`. Not to be confused with the spherical Bessel functions (see `spherical_jn`). See also -------- jve : :math:`J_v` with leading exponential behavior stripped off. spherical_jn : spherical Bessel functions. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "jve", r""" jve(v, z) Exponentially scaled Bessel function of order `v`. Defined as:: jve(v, z) = jv(v, z) * exp(-abs(z.imag)) Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- J : ndarray Value of the exponentially scaled Bessel function. Notes ----- For positive `v` values, the computation is carried out using the AMOS [1]_ `zbesj` routine, which exploits the connection to the modified Bessel function :math:`I_v`, .. math:: J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) For negative `v` values the formula, .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) is used, where :math:`Y_v(z)` is the Bessel function of the second kind, computed using the AMOS routine `zbesy`. Note that the second term is exactly zero for integer `v`; to improve accuracy the second term is explicitly omitted for `v` values such that `v = floor(v)`. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "k0", r""" k0(x) Modified Bessel function of the second kind of order 0, :math:`K_0`. This function is also sometimes referred to as the modified Bessel function of the third kind of order 0. Parameters ---------- x : array_like Argument (float). Returns ------- K : ndarray Value of the modified Bessel function :math:`K_0` at `x`. Notes ----- The range is partitioned into the two intervals [0, 2] and (2, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `k0`. See also -------- kv k0e References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "k0e", """ k0e(x) Exponentially scaled modified Bessel function K of order 0 Defined as:: k0e(x) = exp(x) * k0(x). Parameters ---------- x : array_like Argument (float) Returns ------- K : ndarray Value of the exponentially scaled modified Bessel function K of order 0 at `x`. Notes ----- The range is partitioned into the two intervals [0, 2] and (2, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `k0e`. See also -------- kv k0 References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "k1", """ k1(x) Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. Parameters ---------- x : array_like Argument (float) Returns ------- K : ndarray Value of the modified Bessel function K of order 1 at `x`. Notes ----- The range is partitioned into the two intervals [0, 2] and (2, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `k1`. See also -------- kv k1e References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "k1e", """ k1e(x) Exponentially scaled modified Bessel function K of order 1 Defined as:: k1e(x) = exp(x) * k1(x) Parameters ---------- x : array_like Argument (float) Returns ------- K : ndarray Value of the exponentially scaled modified Bessel function K of order 1 at `x`. Notes ----- The range is partitioned into the two intervals [0, 2] and (2, infinity). Chebyshev polynomial expansions are employed in each interval. This function is a wrapper for the Cephes [1]_ routine `k1e`. See also -------- kv k1 References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "kei", """ kei(x) Kelvin function ker """) add_newdoc("scipy.special", "keip", """ keip(x) Derivative of the Kelvin function kei """) add_newdoc("scipy.special", "kelvin", """ kelvin(x) Kelvin functions as complex numbers Returns ------- Be, Ke, Bep, Kep The tuple (Be, Ke, Bep, Kep) contains complex numbers representing the real and imaginary Kelvin functions and their derivatives evaluated at `x`. For example, kelvin(x)[0].real = ber x and kelvin(x)[0].imag = bei x with similar relationships for ker and kei. """) add_newdoc("scipy.special", "ker", """ ker(x) Kelvin function ker """) add_newdoc("scipy.special", "kerp", """ kerp(x) Derivative of the Kelvin function ker """) add_newdoc("scipy.special", "kl_div", r""" kl_div(x, y) Elementwise function for computing Kullback-Leibler divergence. .. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases} Parameters ---------- x : ndarray First input array. y : ndarray Second input array. Returns ------- res : ndarray Output array. See Also -------- entr, rel_entr Notes ----- This function is non-negative and is jointly convex in `x` and `y`. .. versionadded:: 0.15.0 """) add_newdoc("scipy.special", "kn", r""" kn(n, x) Modified Bessel function of the second kind of integer order `n` Returns the modified Bessel function of the second kind for integer order `n` at real `z`. These are also sometimes called functions of the third kind, Basset functions, or Macdonald functions. Parameters ---------- n : array_like of int Order of Bessel functions (floats will truncate with a warning) z : array_like of float Argument at which to evaluate the Bessel functions Returns ------- out : ndarray The results Notes ----- Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the algorithm used, see [2]_ and the references therein. See Also -------- kv : Same function, but accepts real order and complex argument kvp : Derivative of this function References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel functions of a complex argument and nonnegative order", ACM TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 Examples -------- Plot the function of several orders for real input: >>> from scipy.special import kn >>> import matplotlib.pyplot as plt >>> x = np.linspace(0, 5, 1000) >>> for N in range(6): ... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N)) >>> plt.ylim(0, 10) >>> plt.legend() >>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$') >>> plt.show() Calculate for a single value at multiple orders: >>> kn([4, 5, 6], 1) array([ 44.23241585, 360.9605896 , 3653.83831186]) """) add_newdoc("scipy.special", "kolmogi", """ kolmogi(p) Inverse function to kolmogorov Returns y such that ``kolmogorov(y) == p``. """) add_newdoc("scipy.special", "kolmogorov", """ kolmogorov(y) Complementary cumulative distribution function of Kolmogorov distribution Returns the complementary cumulative distribution function of Kolmogorov's limiting distribution (Kn* for large n) of a two-sided test for equality between an empirical and a theoretical distribution. It is equal to the (limit as n->infinity of the) probability that sqrt(n) * max absolute deviation > y. """) add_newdoc("scipy.special", "kv", r""" kv(v, z) Modified Bessel function of the second kind of real order `v` Returns the modified Bessel function of the second kind for real order `v` at complex `z`. These are also sometimes called functions of the third kind, Basset functions, or Macdonald functions. They are defined as those solutions of the modified Bessel equation for which, .. math:: K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x) as :math:`x \to \infty` [3]_. Parameters ---------- v : array_like of float Order of Bessel functions z : array_like of complex Argument at which to evaluate the Bessel functions Returns ------- out : ndarray The results. Note that input must be of complex type to get complex output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``. Notes ----- Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the algorithm used, see [2]_ and the references therein. See Also -------- kve : This function with leading exponential behavior stripped off. kvp : Derivative of this function References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel functions of a complex argument and nonnegative order", ACM TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 .. [3] NIST Digital Library of Mathematical Functions, Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3 Examples -------- Plot the function of several orders for real input: >>> from scipy.special import kv >>> import matplotlib.pyplot as plt >>> x = np.linspace(0, 5, 1000) >>> for N in np.linspace(0, 6, 5): ... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N)) >>> plt.ylim(0, 10) >>> plt.legend() >>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$') >>> plt.show() Calculate for a single value at multiple orders: >>> kv([4, 4.5, 5], 1+2j) array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j]) """) add_newdoc("scipy.special", "kve", r""" kve(v, z) Exponentially scaled modified Bessel function of the second kind. Returns the exponentially scaled, modified Bessel function of the second kind (sometimes called the third kind) for real order `v` at complex `z`:: kve(v, z) = kv(v, z) * exp(z) Parameters ---------- v : array_like of float Order of Bessel functions z : array_like of complex Argument at which to evaluate the Bessel functions Returns ------- out : ndarray The exponentially scaled modified Bessel function of the second kind. Notes ----- Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the algorithm used, see [2]_ and the references therein. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel functions of a complex argument and nonnegative order", ACM TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 """) add_newdoc("scipy.special", "_lanczos_sum_expg_scaled", """ Internal function, do not use. """) add_newdoc("scipy.special", "_lgam1p", """ Internal function, do not use. """) add_newdoc("scipy.special", "log1p", """ log1p(x) Calculates log(1+x) for use when `x` is near zero """) add_newdoc("scipy.special", "_log1pmx", """ Internal function, do not use. """) add_newdoc('scipy.special', 'logit', """ logit(x) Logit ufunc for ndarrays. The logit function is defined as logit(p) = log(p/(1-p)). Note that logit(0) = -inf, logit(1) = inf, and logit(p) for p<0 or p>1 yields nan. Parameters ---------- x : ndarray The ndarray to apply logit to element-wise. Returns ------- out : ndarray An ndarray of the same shape as x. Its entries are logit of the corresponding entry of x. See Also -------- expit Notes ----- As a ufunc logit takes a number of optional keyword arguments. For more information see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_ .. versionadded:: 0.10.0 Examples -------- >>> from scipy.special import logit, expit >>> logit([0, 0.25, 0.5, 0.75, 1]) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) `expit` is the inverse of `logit`: >>> expit(logit([0.1, 0.75, 0.999])) array([ 0.1 , 0.75 , 0.999]) Plot logit(x) for x in [0, 1]: >>> import matplotlib.pyplot as plt >>> x = np.linspace(0, 1, 501) >>> y = logit(x) >>> plt.plot(x, y) >>> plt.grid() >>> plt.ylim(-6, 6) >>> plt.xlabel('x') >>> plt.title('logit(x)') >>> plt.show() """) add_newdoc("scipy.special", "lpmv", r""" lpmv(m, v, x) Associated Legendre function of integer order and real degree. Defined as .. math:: P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x) where .. math:: P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2} \left(\frac{1 - x}{2}\right)^k is the Legendre function of the first kind. Here :math:`(\cdot)_k` is the Pochhammer symbol; see `poch`. Parameters ---------- m : array_like Order (int or float). If passed a float not equal to an integer the function returns NaN. v : array_like Degree (float). x : array_like Argument (float). Must have ``|x| <= 1``. Returns ------- pmv : ndarray Value of the associated Legendre function. See Also -------- lpmn : Compute the associated Legendre function for all orders ``0, ..., m`` and degrees ``0, ..., n``. clpmn : Compute the associated Legendre function at complex arguments. Notes ----- Note that this implementation includes the Condon-Shortley phase. References ---------- .. [1] Zhang, Jin, "Computation of Special Functions", John Wiley and Sons, Inc, 1996. """) add_newdoc("scipy.special", "mathieu_a", """ mathieu_a(m, q) Characteristic value of even Mathieu functions Returns the characteristic value for the even solution, ``ce_m(z, q)``, of Mathieu's equation. """) add_newdoc("scipy.special", "mathieu_b", """ mathieu_b(m, q) Characteristic value of odd Mathieu functions Returns the characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's equation. """) add_newdoc("scipy.special", "mathieu_cem", """ mathieu_cem(m, q, x) Even Mathieu function and its derivative Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and parameter `q` evaluated at `x` (given in degrees). Also returns the derivative with respect to `x` of ce_m(x, q) Parameters ---------- m Order of the function q Parameter of the function x Argument of the function, *given in degrees, not radians* Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "mathieu_modcem1", """ mathieu_modcem1(m, q, x) Even modified Mathieu function of the first kind and its derivative Evaluates the even modified Mathieu function of the first kind, ``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter `q`. Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "mathieu_modcem2", """ mathieu_modcem2(m, q, x) Even modified Mathieu function of the second kind and its derivative Evaluates the even modified Mathieu function of the second kind, Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m` and parameter `q`. Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "mathieu_modsem1", """ mathieu_modsem1(m, q, x) Odd modified Mathieu function of the first kind and its derivative Evaluates the odd modified Mathieu function of the first kind, Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m` and parameter `q`. Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "mathieu_modsem2", """ mathieu_modsem2(m, q, x) Odd modified Mathieu function of the second kind and its derivative Evaluates the odd modified Mathieu function of the second kind, Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m` and parameter q. Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "mathieu_sem", """ mathieu_sem(m, q, x) Odd Mathieu function and its derivative Returns the odd Mathieu function, se_m(x, q), of order `m` and parameter `q` evaluated at `x` (given in degrees). Also returns the derivative with respect to `x` of se_m(x, q). Parameters ---------- m Order of the function q Parameter of the function x Argument of the function, *given in degrees, not radians*. Returns ------- y Value of the function yp Value of the derivative vs x """) add_newdoc("scipy.special", "modfresnelm", """ modfresnelm(x) Modified Fresnel negative integrals Returns ------- fm Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)`` km Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp`` """) add_newdoc("scipy.special", "modfresnelp", """ modfresnelp(x) Modified Fresnel positive integrals Returns ------- fp Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)`` kp Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp`` """) add_newdoc("scipy.special", "modstruve", r""" modstruve(v, x) Modified Struve function. Return the value of the modified Struve function of order `v` at `x`. The modified Struve function is defined as, .. math:: L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x), where :math:`H_v` is the Struve function. Parameters ---------- v : array_like Order of the modified Struve function (float). x : array_like Argument of the Struve function (float; must be positive unless `v` is an integer). Returns ------- L : ndarray Value of the modified Struve function of order `v` at `x`. Notes ----- Three methods discussed in [1]_ are used to evaluate the function: - power series - expansion in Bessel functions (if :math:`|z| < |v| + 20`) - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) Rounding errors are estimated based on the largest terms in the sums, and the result associated with the smallest error is returned. See also -------- struve References ---------- .. [1] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/11 """) add_newdoc("scipy.special", "nbdtr", r""" nbdtr(k, n, p) Negative binomial cumulative distribution function. Returns the sum of the terms 0 through `k` of the negative binomial distribution probability mass function, .. math:: F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j. In a sequence of Bernoulli trials with individual success probabilities `p`, this is the probability that `k` or fewer failures precede the nth success. Parameters ---------- k : array_like The maximum number of allowed failures (nonnegative int). n : array_like The target number of successes (positive int). p : array_like Probability of success in a single event (float). Returns ------- F : ndarray The probability of `k` or fewer failures before `n` successes in a sequence of events with individual success probability `p`. See also -------- nbdtrc Notes ----- If floating point values are passed for `k` or `n`, they will be truncated to integers. The terms are not summed directly; instead the regularized incomplete beta function is employed, according to the formula, .. math:: \mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1). Wrapper for the Cephes [1]_ routine `nbdtr`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "nbdtrc", r""" nbdtrc(k, n, p) Negative binomial survival function. Returns the sum of the terms `k + 1` to infinity of the negative binomial distribution probability mass function, .. math:: F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j. In a sequence of Bernoulli trials with individual success probabilities `p`, this is the probability that more than `k` failures precede the nth success. Parameters ---------- k : array_like The maximum number of allowed failures (nonnegative int). n : array_like The target number of successes (positive int). p : array_like Probability of success in a single event (float). Returns ------- F : ndarray The probability of `k + 1` or more failures before `n` successes in a sequence of events with individual success probability `p`. Notes ----- If floating point values are passed for `k` or `n`, they will be truncated to integers. The terms are not summed directly; instead the regularized incomplete beta function is employed, according to the formula, .. math:: \mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n). Wrapper for the Cephes [1]_ routine `nbdtrc`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "nbdtri", """ nbdtri(k, n, y) Inverse of `nbdtr` vs `p`. Returns the inverse with respect to the parameter `p` of `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution function. Parameters ---------- k : array_like The maximum number of allowed failures (nonnegative int). n : array_like The target number of successes (positive int). y : array_like The probability of `k` or fewer failures before `n` successes (float). Returns ------- p : ndarray Probability of success in a single event (float) such that `nbdtr(k, n, p) = y`. See also -------- nbdtr : Cumulative distribution function of the negative binomial. nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. Notes ----- Wrapper for the Cephes [1]_ routine `nbdtri`. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "nbdtrik", r""" nbdtrik(y, n, p) Inverse of `nbdtr` vs `k`. Returns the inverse with respect to the parameter `k` of `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution function. Parameters ---------- y : array_like The probability of `k` or fewer failures before `n` successes (float). n : array_like The target number of successes (positive int). p : array_like Probability of success in a single event (float). Returns ------- k : ndarray The maximum number of allowed failures such that `nbdtr(k, n, p) = y`. See also -------- nbdtr : Cumulative distribution function of the negative binomial. nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. Formula 26.5.26 of [2]_, .. math:: \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), is used to reduce calculation of the cumulative distribution function to that of a regularized incomplete beta :math:`I`. Computation of `k` involves a search for a value that produces the desired value of `y`. The search relies on the monotonicity of `y` with `k`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """) add_newdoc("scipy.special", "nbdtrin", r""" nbdtrin(k, y, p) Inverse of `nbdtr` vs `n`. Returns the inverse with respect to the parameter `n` of `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution function. Parameters ---------- k : array_like The maximum number of allowed failures (nonnegative int). y : array_like The probability of `k` or fewer failures before `n` successes (float). p : array_like Probability of success in a single event (float). Returns ------- n : ndarray The number of successes `n` such that `nbdtr(k, n, p) = y`. See also -------- nbdtr : Cumulative distribution function of the negative binomial. nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. Formula 26.5.26 of [2]_, .. math:: \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), is used to reduce calculation of the cumulative distribution function to that of a regularized incomplete beta :math:`I`. Computation of `n` involves a search for a value that produces the desired value of `y`. The search relies on the monotonicity of `y` with `n`. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """) add_newdoc("scipy.special", "ncfdtr", r""" ncfdtr(dfn, dfd, nc, f) Cumulative distribution function of the non-central F distribution. The non-central F describes the distribution of, .. math:: Z = \frac{X/d_n}{Y/d_d} where :math:`X` and :math:`Y` are independently distributed, with :math:`X` distributed non-central :math:`\chi^2` with noncentrality parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y` distributed :math:`\chi^2` with :math:`d_d` degrees of freedom. Parameters ---------- dfn : array_like Degrees of freedom of the numerator sum of squares. Range (0, inf). dfd : array_like Degrees of freedom of the denominator sum of squares. Range (0, inf). nc : array_like Noncentrality parameter. Should be in range (0, 1e4). f : array_like Quantiles, i.e. the upper limit of integration. Returns ------- cdf : float or ndarray The calculated CDF. If all inputs are scalar, the return will be a float. Otherwise it will be an array. See Also -------- ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. Notes ----- Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`. The cumulative distribution function is computed using Formula 26.6.20 of [2]_: .. math:: F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}), where :math:`I` is the regularized incomplete beta function, and :math:`x = f d_n/(f d_n + d_d)`. The computation time required for this routine is proportional to the noncentrality parameter `nc`. Very large values of this parameter can consume immense computer resources. This is why the search range is bounded by 10,000. References ---------- .. [1] Barry Brown, James Lovato, and Kathy Russell, CDFLIB: Library of Fortran Routines for Cumulative Distribution Functions, Inverses, and Other Parameters. .. [2] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. Examples -------- >>> from scipy import special >>> from scipy import stats >>> import matplotlib.pyplot as plt Plot the CDF of the non-central F distribution, for nc=0. Compare with the F-distribution from scipy.stats: >>> x = np.linspace(-1, 8, num=500) >>> dfn = 3 >>> dfd = 2 >>> ncf_stats = stats.f.cdf(x, dfn, dfd) >>> ncf_special = special.ncfdtr(dfn, dfd, 0, x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, ncf_stats, 'b-', lw=3) >>> ax.plot(x, ncf_special, 'r-') >>> plt.show() """) add_newdoc("scipy.special", "ncfdtri", """ ncfdtri(dfn, dfd, nc, p) Inverse with respect to `f` of the CDF of the non-central F distribution. See `ncfdtr` for more details. Parameters ---------- dfn : array_like Degrees of freedom of the numerator sum of squares. Range (0, inf). dfd : array_like Degrees of freedom of the denominator sum of squares. Range (0, inf). nc : array_like Noncentrality parameter. Should be in range (0, 1e4). p : array_like Value of the cumulative distribution function. Must be in the range [0, 1]. Returns ------- f : float Quantiles, i.e. the upper limit of integration. See Also -------- ncfdtr : CDF of the non-central F distribution. ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. Examples -------- >>> from scipy.special import ncfdtr, ncfdtri Compute the CDF for several values of `f`: >>> f = [0.5, 1, 1.5] >>> p = ncfdtr(2, 3, 1.5, f) >>> p array([ 0.20782291, 0.36107392, 0.47345752]) Compute the inverse. We recover the values of `f`, as expected: >>> ncfdtri(2, 3, 1.5, p) array([ 0.5, 1. , 1.5]) """) add_newdoc("scipy.special", "ncfdtridfd", """ ncfdtridfd(dfn, p, nc, f) Calculate degrees of freedom (denominator) for the noncentral F-distribution. This is the inverse with respect to `dfd` of `ncfdtr`. See `ncfdtr` for more details. Parameters ---------- dfn : array_like Degrees of freedom of the numerator sum of squares. Range (0, inf). p : array_like Value of the cumulative distribution function. Must be in the range [0, 1]. nc : array_like Noncentrality parameter. Should be in range (0, 1e4). f : array_like Quantiles, i.e. the upper limit of integration. Returns ------- dfd : float Degrees of freedom of the denominator sum of squares. See Also -------- ncfdtr : CDF of the non-central F distribution. ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. Notes ----- The value of the cumulative noncentral F distribution is not necessarily monotone in either degrees of freedom. There thus may be two values that provide a given CDF value. This routine assumes monotonicity and will find an arbitrary one of the two values. Examples -------- >>> from scipy.special import ncfdtr, ncfdtridfd Compute the CDF for several values of `dfd`: >>> dfd = [1, 2, 3] >>> p = ncfdtr(2, dfd, 0.25, 15) >>> p array([ 0.8097138 , 0.93020416, 0.96787852]) Compute the inverse. We recover the values of `dfd`, as expected: >>> ncfdtridfd(2, p, 0.25, 15) array([ 1., 2., 3.]) """) add_newdoc("scipy.special", "ncfdtridfn", """ ncfdtridfn(p, dfd, nc, f) Calculate degrees of freedom (numerator) for the noncentral F-distribution. This is the inverse with respect to `dfn` of `ncfdtr`. See `ncfdtr` for more details. Parameters ---------- p : array_like Value of the cumulative distribution function. Must be in the range [0, 1]. dfd : array_like Degrees of freedom of the denominator sum of squares. Range (0, inf). nc : array_like Noncentrality parameter. Should be in range (0, 1e4). f : float Quantiles, i.e. the upper limit of integration. Returns ------- dfn : float Degrees of freedom of the numerator sum of squares. See Also -------- ncfdtr : CDF of the non-central F distribution. ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. Notes ----- The value of the cumulative noncentral F distribution is not necessarily monotone in either degrees of freedom. There thus may be two values that provide a given CDF value. This routine assumes monotonicity and will find an arbitrary one of the two values. Examples -------- >>> from scipy.special import ncfdtr, ncfdtridfn Compute the CDF for several values of `dfn`: >>> dfn = [1, 2, 3] >>> p = ncfdtr(dfn, 2, 0.25, 15) >>> p array([ 0.92562363, 0.93020416, 0.93188394]) Compute the inverse. We recover the values of `dfn`, as expected: >>> ncfdtridfn(p, 2, 0.25, 15) array([ 1., 2., 3.]) """) add_newdoc("scipy.special", "ncfdtrinc", """ ncfdtrinc(dfn, dfd, p, f) Calculate non-centrality parameter for non-central F distribution. This is the inverse with respect to `nc` of `ncfdtr`. See `ncfdtr` for more details. Parameters ---------- dfn : array_like Degrees of freedom of the numerator sum of squares. Range (0, inf). dfd : array_like Degrees of freedom of the denominator sum of squares. Range (0, inf). p : array_like Value of the cumulative distribution function. Must be in the range [0, 1]. f : array_like Quantiles, i.e. the upper limit of integration. Returns ------- nc : float Noncentrality parameter. See Also -------- ncfdtr : CDF of the non-central F distribution. ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. Examples -------- >>> from scipy.special import ncfdtr, ncfdtrinc Compute the CDF for several values of `nc`: >>> nc = [0.5, 1.5, 2.0] >>> p = ncfdtr(2, 3, nc, 15) >>> p array([ 0.96309246, 0.94327955, 0.93304098]) Compute the inverse. We recover the values of `nc`, as expected: >>> ncfdtrinc(2, 3, p, 15) array([ 0.5, 1.5, 2. ]) """) add_newdoc("scipy.special", "nctdtr", """ nctdtr(df, nc, t) Cumulative distribution function of the non-central `t` distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution. Should be in range (0, inf). nc : array_like Noncentrality parameter. Should be in range (-1e6, 1e6). t : array_like Quantiles, i.e. the upper limit of integration. Returns ------- cdf : float or ndarray The calculated CDF. If all inputs are scalar, the return will be a float. Otherwise it will be an array. See Also -------- nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. Examples -------- >>> from scipy import special >>> from scipy import stats >>> import matplotlib.pyplot as plt Plot the CDF of the non-central t distribution, for nc=0. Compare with the t-distribution from scipy.stats: >>> x = np.linspace(-5, 5, num=500) >>> df = 3 >>> nct_stats = stats.t.cdf(x, df) >>> nct_special = special.nctdtr(df, 0, x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, nct_stats, 'b-', lw=3) >>> ax.plot(x, nct_special, 'r-') >>> plt.show() """) add_newdoc("scipy.special", "nctdtridf", """ nctdtridf(p, nc, t) Calculate degrees of freedom for non-central t distribution. See `nctdtr` for more details. Parameters ---------- p : array_like CDF values, in range (0, 1]. nc : array_like Noncentrality parameter. Should be in range (-1e6, 1e6). t : array_like Quantiles, i.e. the upper limit of integration. """) add_newdoc("scipy.special", "nctdtrinc", """ nctdtrinc(df, p, t) Calculate non-centrality parameter for non-central t distribution. See `nctdtr` for more details. Parameters ---------- df : array_like Degrees of freedom of the distribution. Should be in range (0, inf). p : array_like CDF values, in range (0, 1]. t : array_like Quantiles, i.e. the upper limit of integration. """) add_newdoc("scipy.special", "nctdtrit", """ nctdtrit(df, nc, p) Inverse cumulative distribution function of the non-central t distribution. See `nctdtr` for more details. Parameters ---------- df : array_like Degrees of freedom of the distribution. Should be in range (0, inf). nc : array_like Noncentrality parameter. Should be in range (-1e6, 1e6). p : array_like CDF values, in range (0, 1]. """) add_newdoc("scipy.special", "ndtr", r""" ndtr(x) Gaussian cumulative distribution function. Returns the area under the standard Gaussian probability density function, integrated from minus infinity to `x` .. math:: \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt Parameters ---------- x : array_like, real or complex Argument Returns ------- ndarray The value of the normal CDF evaluated at `x` See Also -------- erf erfc scipy.stats.norm log_ndtr """) add_newdoc("scipy.special", "nrdtrimn", """ nrdtrimn(p, x, std) Calculate mean of normal distribution given other params. Parameters ---------- p : array_like CDF values, in range (0, 1]. x : array_like Quantiles, i.e. the upper limit of integration. std : array_like Standard deviation. Returns ------- mn : float or ndarray The mean of the normal distribution. See Also -------- nrdtrimn, ndtr """) add_newdoc("scipy.special", "nrdtrisd", """ nrdtrisd(p, x, mn) Calculate standard deviation of normal distribution given other params. Parameters ---------- p : array_like CDF values, in range (0, 1]. x : array_like Quantiles, i.e. the upper limit of integration. mn : float or ndarray The mean of the normal distribution. Returns ------- std : array_like Standard deviation. See Also -------- nrdtristd, ndtr """) add_newdoc("scipy.special", "log_ndtr", """ log_ndtr(x) Logarithm of Gaussian cumulative distribution function. Returns the log of the area under the standard Gaussian probability density function, integrated from minus infinity to `x`:: log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)) Parameters ---------- x : array_like, real or complex Argument Returns ------- ndarray The value of the log of the normal CDF evaluated at `x` See Also -------- erf erfc scipy.stats.norm ndtr """) add_newdoc("scipy.special", "ndtri", """ ndtri(y) Inverse of `ndtr` vs x Returns the argument x for which the area under the Gaussian probability density function (integrated from minus infinity to `x`) is equal to y. """) add_newdoc("scipy.special", "obl_ang1", """ obl_ang1(m, n, c, x) Oblate spheroidal angular function of the first kind and its derivative Computes the oblate spheroidal angular function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "obl_ang1_cv", """ obl_ang1_cv(m, n, c, cv, x) Oblate spheroidal angular function obl_ang1 for precomputed characteristic value Computes the oblate spheroidal angular function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "obl_cv", """ obl_cv(m, n, c) Characteristic value of oblate spheroidal function Computes the characteristic value of oblate spheroidal wave functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. """) add_newdoc("scipy.special", "obl_rad1", """ obl_rad1(m, n, c, x) Oblate spheroidal radial function of the first kind and its derivative Computes the oblate spheroidal radial function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "obl_rad1_cv", """ obl_rad1_cv(m, n, c, cv, x) Oblate spheroidal radial function obl_rad1 for precomputed characteristic value Computes the oblate spheroidal radial function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "obl_rad2", """ obl_rad2(m, n, c, x) Oblate spheroidal radial function of the second kind and its derivative. Computes the oblate spheroidal radial function of the second kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "obl_rad2_cv", """ obl_rad2_cv(m, n, c, cv, x) Oblate spheroidal radial function obl_rad2 for precomputed characteristic value Computes the oblate spheroidal radial function of the second kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pbdv", """ pbdv(v, x) Parabolic cylinder function D Returns (d, dp) the parabolic cylinder function Dv(x) in d and the derivative, Dv'(x) in dp. Returns ------- d Value of the function dp Value of the derivative vs x """) add_newdoc("scipy.special", "pbvv", """ pbvv(v, x) Parabolic cylinder function V Returns the parabolic cylinder function Vv(x) in v and the derivative, Vv'(x) in vp. Returns ------- v Value of the function vp Value of the derivative vs x """) add_newdoc("scipy.special", "pbwa", r""" pbwa(a, x) Parabolic cylinder function W. The function is a particular solution to the differential equation .. math:: y'' + \left(\frac{1}{4}x^2 - a\right)y = 0, for a full definition see section 12.14 in [1]_. Parameters ---------- a : array_like Real parameter x : array_like Real argument Returns ------- w : scalar or ndarray Value of the function wp : scalar or ndarray Value of the derivative in x Notes ----- The function is a wrapper for a Fortran routine by Zhang and Jin [2]_. The implementation is accurate only for ``|a|, |x| < 5`` and returns NaN outside that range. References ---------- .. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30 .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """) add_newdoc("scipy.special", "pdtr", """ pdtr(k, m) Poisson cumulative distribution function Returns the sum of the first `k` terms of the Poisson distribution: sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments must both be positive and `k` an integer. """) add_newdoc("scipy.special", "pdtrc", """ pdtrc(k, m) Poisson survival function Returns the sum of the terms from k+1 to infinity of the Poisson distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc( k+1, m). Arguments must both be positive and `k` an integer. """) add_newdoc("scipy.special", "pdtri", """ pdtri(k, y) Inverse to `pdtr` vs m Returns the Poisson variable `m` such that the sum from 0 to `k` of the Poisson density is equal to the given probability `y`: calculated by gammaincinv(k+1, y). `k` must be a nonnegative integer and `y` between 0 and 1. """) add_newdoc("scipy.special", "pdtrik", """ pdtrik(p, m) Inverse to `pdtr` vs k Returns the quantile k such that ``pdtr(k, m) = p`` """) add_newdoc("scipy.special", "poch", r""" poch(z, m) Rising factorial (z)_m The Pochhammer symbol (rising factorial), is defined as .. math:: (z)_m = \frac{\Gamma(z + m)}{\Gamma(z)} For positive integer `m` it reads .. math:: (z)_m = z (z + 1) ... (z + m - 1) Parameters ---------- z : array_like (int or float) m : array_like (int or float) Returns ------- poch : ndarray The value of the function. """) add_newdoc("scipy.special", "pro_ang1", """ pro_ang1(m, n, c, x) Prolate spheroidal angular function of the first kind and its derivative Computes the prolate spheroidal angular function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pro_ang1_cv", """ pro_ang1_cv(m, n, c, cv, x) Prolate spheroidal angular function pro_ang1 for precomputed characteristic value Computes the prolate spheroidal angular function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pro_cv", """ pro_cv(m, n, c) Characteristic value of prolate spheroidal function Computes the characteristic value of prolate spheroidal wave functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. """) add_newdoc("scipy.special", "pro_rad1", """ pro_rad1(m, n, c, x) Prolate spheroidal radial function of the first kind and its derivative Computes the prolate spheroidal radial function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pro_rad1_cv", """ pro_rad1_cv(m, n, c, cv, x) Prolate spheroidal radial function pro_rad1 for precomputed characteristic value Computes the prolate spheroidal radial function of the first kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pro_rad2", """ pro_rad2(m, n, c, x) Prolate spheroidal radial function of the second kind and its derivative Computes the prolate spheroidal radial function of the second kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pro_rad2_cv", """ pro_rad2_cv(m, n, c, cv, x) Prolate spheroidal radial function pro_rad2 for precomputed characteristic value Computes the prolate spheroidal radial function of the second kind and its derivative (with respect to `x`) for mode parameters m>=0 and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires pre-computed characteristic value. Returns ------- s Value of the function sp Value of the derivative vs x """) add_newdoc("scipy.special", "pseudo_huber", r""" pseudo_huber(delta, r) Pseudo-Huber loss function. .. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right) Parameters ---------- delta : ndarray Input array, indicating the soft quadratic vs. linear loss changepoint. r : ndarray Input array, possibly representing residuals. Returns ------- res : ndarray The computed Pseudo-Huber loss function values. Notes ----- This function is convex in :math:`r`. .. versionadded:: 0.15.0 """) add_newdoc("scipy.special", "psi", """ psi(z, out=None) The digamma function. The logarithmic derivative of the gamma function evaluated at ``z``. Parameters ---------- z : array_like Real or complex argument. out : ndarray, optional Array for the computed values of ``psi``. Returns ------- digamma : ndarray Computed values of ``psi``. Notes ----- For large values not close to the negative real axis ``psi`` is computed using the asymptotic series (5.11.2) from [1]_. For small arguments not close to the negative real axis the recurrence relation (5.5.2) from [1]_ is used until the argument is large enough to use the asymptotic series. For values close to the negative real axis the reflection formula (5.5.4) from [1]_ is used first. Note that ``psi`` has a family of zeros on the negative real axis which occur between the poles at nonpositive integers. Around the zeros the reflection formula suffers from cancellation and the implementation loses precision. The sole positive zero and the first negative zero, however, are handled separately by precomputing series expansions using [2]_, so the function should maintain full accuracy around the origin. References ---------- .. [1] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/5 .. [2] Fredrik Johansson and others. "mpmath: a Python library for arbitrary-precision floating-point arithmetic" (Version 0.19) http://mpmath.org/ """) add_newdoc("scipy.special", "radian", """ radian(d, m, s) Convert from degrees to radians Returns the angle given in (d)egrees, (m)inutes, and (s)econds in radians. """) add_newdoc("scipy.special", "rel_entr", r""" rel_entr(x, y) Elementwise function for computing relative entropy. .. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases} Parameters ---------- x : ndarray First input array. y : ndarray Second input array. Returns ------- res : ndarray Output array. See Also -------- entr, kl_div Notes ----- This function is jointly convex in x and y. .. versionadded:: 0.15.0 """) add_newdoc("scipy.special", "rgamma", """ rgamma(z) Gamma function inverted Returns ``1/gamma(x)`` """) add_newdoc("scipy.special", "round", """ round(x) Round to nearest integer Returns the nearest integer to `x` as a double precision floating point result. If `x` ends in 0.5 exactly, the nearest even integer is chosen. """) add_newdoc("scipy.special", "shichi", r""" shichi(x, out=None) Hyperbolic sine and cosine integrals. The hyperbolic sine integral is .. math:: \int_0^x \frac{\sinh{t}}{t}dt and the hyperbolic cosine integral is .. math:: \gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt where :math:`\gamma` is Euler's constant and :math:`\log` is the principle branch of the logarithm. Parameters ---------- x : array_like Real or complex points at which to compute the hyperbolic sine and cosine integrals. Returns ------- si : ndarray Hyperbolic sine integral at ``x`` ci : ndarray Hyperbolic cosine integral at ``x`` Notes ----- For real arguments with ``x < 0``, ``chi`` is the real part of the hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x + 0j)`` differ by a factor of ``1j*pi``. For real arguments the function is computed by calling Cephes' [1]_ *shichi* routine. For complex arguments the algorithm is based on Mpmath's [2]_ *shi* and *chi* routines. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Fredrik Johansson and others. "mpmath: a Python library for arbitrary-precision floating-point arithmetic" (Version 0.19) http://mpmath.org/ """) add_newdoc("scipy.special", "sici", r""" sici(x, out=None) Sine and cosine integrals. The sine integral is .. math:: \int_0^x \frac{\sin{t}}{t}dt and the cosine integral is .. math:: \gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt where :math:`\gamma` is Euler's constant and :math:`\log` is the principle branch of the logarithm. Parameters ---------- x : array_like Real or complex points at which to compute the sine and cosine integrals. Returns ------- si : ndarray Sine integral at ``x`` ci : ndarray Cosine integral at ``x`` Notes ----- For real arguments with ``x < 0``, ``ci`` is the real part of the cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)`` differ by a factor of ``1j*pi``. For real arguments the function is computed by calling Cephes' [1]_ *sici* routine. For complex arguments the algorithm is based on Mpmath's [2]_ *si* and *ci* routines. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html .. [2] Fredrik Johansson and others. "mpmath: a Python library for arbitrary-precision floating-point arithmetic" (Version 0.19) http://mpmath.org/ """) add_newdoc("scipy.special", "sindg", """ sindg(x) Sine of angle given in degrees """) add_newdoc("scipy.special", "smirnov", """ smirnov(n, e) Kolmogorov-Smirnov complementary cumulative distribution function Returns the exact Kolmogorov-Smirnov complementary cumulative distribution function (Dn+ or Dn-) for a one-sided test of equality between an empirical and a theoretical distribution. It is equal to the probability that the maximum difference between a theoretical distribution and an empirical one based on `n` samples is greater than e. """) add_newdoc("scipy.special", "smirnovi", """ smirnovi(n, y) Inverse to `smirnov` Returns ``e`` such that ``smirnov(n, e) = y``. """) add_newdoc("scipy.special", "spence", r""" spence(z, out=None) Spence's function, also known as the dilogarithm. It is defined to be .. math:: \int_0^z \frac{\log(t)}{1 - t}dt for complex :math:`z`, where the contour of integration is taken to avoid the branch cut of the logarithm. Spence's function is analytic everywhere except the negative real axis where it has a branch cut. Parameters ---------- z : array_like Points at which to evaluate Spence's function Returns ------- s : ndarray Computed values of Spence's function Notes ----- There is a different convention which defines Spence's function by the integral .. math:: -\int_0^z \frac{\log(1 - t)}{t}dt; this is our ``spence(1 - z)``. """) add_newdoc("scipy.special", "stdtr", """ stdtr(df, t) Student t distribution cumulative density function Returns the integral from minus infinity to t of the Student t distribution with df > 0 degrees of freedom:: gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) * integral((1+x**2/df)**(-df/2-1/2), x=-inf..t) """) add_newdoc("scipy.special", "stdtridf", """ stdtridf(p, t) Inverse of `stdtr` vs df Returns the argument df such that stdtr(df, t) is equal to `p`. """) add_newdoc("scipy.special", "stdtrit", """ stdtrit(df, p) Inverse of `stdtr` vs `t` Returns the argument `t` such that stdtr(df, t) is equal to `p`. """) add_newdoc("scipy.special", "struve", r""" struve(v, x) Struve function. Return the value of the Struve function of order `v` at `x`. The Struve function is defined as, .. math:: H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})}, where :math:`\Gamma` is the gamma function. Parameters ---------- v : array_like Order of the Struve function (float). x : array_like Argument of the Struve function (float; must be positive unless `v` is an integer). Returns ------- H : ndarray Value of the Struve function of order `v` at `x`. Notes ----- Three methods discussed in [1]_ are used to evaluate the Struve function: - power series - expansion in Bessel functions (if :math:`|z| < |v| + 20`) - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) Rounding errors are estimated based on the largest terms in the sums, and the result associated with the smallest error is returned. See also -------- modstruve References ---------- .. [1] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/11 """) add_newdoc("scipy.special", "tandg", """ tandg(x) Tangent of angle x given in degrees. """) add_newdoc("scipy.special", "tklmbda", """ tklmbda(x, lmbda) Tukey-Lambda cumulative distribution function """) add_newdoc("scipy.special", "wofz", """ wofz(z) Faddeeva function Returns the value of the Faddeeva function for complex argument:: exp(-z**2) * erfc(-i*z) See Also -------- dawsn, erf, erfc, erfcx, erfi References ---------- .. [1] Steven G. Johnson, Faddeeva W function implementation. http://ab-initio.mit.edu/Faddeeva Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-3, 3) >>> z = special.wofz(x) >>> plt.plot(x, z.real, label='wofz(x).real') >>> plt.plot(x, z.imag, label='wofz(x).imag') >>> plt.xlabel('$x$') >>> plt.legend(framealpha=1, shadow=True) >>> plt.grid(alpha=0.25) >>> plt.show() """) add_newdoc("scipy.special", "xlogy", """ xlogy(x, y) Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. Parameters ---------- x : array_like Multiplier y : array_like Argument Returns ------- z : array_like Computed x*log(y) Notes ----- .. versionadded:: 0.13.0 """) add_newdoc("scipy.special", "xlog1py", """ xlog1py(x, y) Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. Parameters ---------- x : array_like Multiplier y : array_like Argument Returns ------- z : array_like Computed x*log1p(y) Notes ----- .. versionadded:: 0.13.0 """) add_newdoc("scipy.special", "y0", r""" y0(x) Bessel function of the second kind of order 0. Parameters ---------- x : array_like Argument (float). Returns ------- Y : ndarray Value of the Bessel function of the second kind of order 0 at `x`. Notes ----- The domain is divided into the intervals [0, 5] and (5, infinity). In the first interval a rational approximation :math:`R(x)` is employed to compute, .. math:: Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi}, where :math:`J_0` is the Bessel function of the first kind of order 0. In the second interval, the Hankel asymptotic expansion is employed with two rational functions of degree 6/6 and 7/7. This function is a wrapper for the Cephes [1]_ routine `y0`. See also -------- j0 yv References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "y1", """ y1(x) Bessel function of the second kind of order 1. Parameters ---------- x : array_like Argument (float). Returns ------- Y : ndarray Value of the Bessel function of the second kind of order 1 at `x`. Notes ----- The domain is divided into the intervals [0, 8] and (8, infinity). In the first interval a 25 term Chebyshev expansion is used, and computing :math:`J_1` (the Bessel function of the first kind) is required. In the second, the asymptotic trigonometric representation is employed using two rational functions of degree 5/5. This function is a wrapper for the Cephes [1]_ routine `y1`. See also -------- j1 yn yv References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "yn", r""" yn(n, x) Bessel function of the second kind of integer order and real argument. Parameters ---------- n : array_like Order (integer). z : array_like Argument (float). Returns ------- Y : ndarray Value of the Bessel function, :math:`Y_n(x)`. Notes ----- Wrapper for the Cephes [1]_ routine `yn`. The function is evaluated by forward recurrence on `n`, starting with values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1, the routine for `y0` or `y1` is called directly. See also -------- yv : For real order and real or complex argument. References ---------- .. [1] Cephes Mathematical Functions Library, http://www.netlib.org/cephes/index.html """) add_newdoc("scipy.special", "yv", r""" yv(v, z) Bessel function of the second kind of real order and complex argument. Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- Y : ndarray Value of the Bessel function of the second kind, :math:`Y_v(x)`. Notes ----- For positive `v` values, the computation is carried out using the AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). For negative `v` values the formula, .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) is used, where :math:`J_v(z)` is the Bessel function of the first kind, computed using the AMOS routine `zbesj`. Note that the second term is exactly zero for integer `v`; to improve accuracy the second term is explicitly omitted for `v` values such that `v = floor(v)`. See also -------- yve : :math:`Y_v` with leading exponential behavior stripped off. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "yve", r""" yve(v, z) Exponentially scaled Bessel function of the second kind of real order. Returns the exponentially scaled Bessel function of the second kind of real order `v` at complex `z`:: yve(v, z) = yv(v, z) * exp(-abs(z.imag)) Parameters ---------- v : array_like Order (float). z : array_like Argument (float or complex). Returns ------- Y : ndarray Value of the exponentially scaled Bessel function. Notes ----- For positive `v` values, the computation is carried out using the AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). For negative `v` values the formula, .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) is used, where :math:`J_v(z)` is the Bessel function of the first kind, computed using the AMOS routine `zbesj`. Note that the second term is exactly zero for integer `v`; to improve accuracy the second term is explicitly omitted for `v` values such that `v = floor(v)`. References ---------- .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions of a Complex Argument and Nonnegative Order", http://netlib.org/amos/ """) add_newdoc("scipy.special", "_zeta", """ _zeta(x, q) Internal function, Hurwitz zeta. """) add_newdoc("scipy.special", "zetac", """ zetac(x) Riemann zeta function minus 1. This function is defined as .. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x, where ``x > 1``. For ``x < 1``, the analytic continuation is computed. Because of limitations of the numerical algorithm, ``zetac(x)`` returns `nan` for `x` less than -30.8148. Parameters ---------- x : array_like of float Values at which to compute zeta(x) - 1 (must be real). Returns ------- out : array_like Values of zeta(x) - 1. See Also -------- zeta Examples -------- >>> from scipy.special import zetac, zeta Some special values: >>> zetac(2), np.pi**2/6 - 1 (0.64493406684822641, 0.6449340668482264) >>> zetac(-1), -1.0/12 - 1 (-1.0833333333333333, -1.0833333333333333) Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`: >>> zetac(60), zeta(60) - 1 (8.673617380119933e-19, 0.0) """) add_newdoc("scipy.special", "_struve_asymp_large_z", """ _struve_asymp_large_z(v, z, is_h) Internal function for testing `struve` & `modstruve` Evaluates using asymptotic expansion Returns ------- v, err """) add_newdoc("scipy.special", "_struve_power_series", """ _struve_power_series(v, z, is_h) Internal function for testing `struve` & `modstruve` Evaluates using power series Returns ------- v, err """) add_newdoc("scipy.special", "_struve_bessel_series", """ _struve_bessel_series(v, z, is_h) Internal function for testing `struve` & `modstruve` Evaluates using Bessel function series Returns ------- v, err """) add_newdoc("scipy.special", "_spherical_jn", """ Internal function, use `spherical_jn` instead. """) add_newdoc("scipy.special", "_spherical_jn_d", """ Internal function, use `spherical_jn` instead. """) add_newdoc("scipy.special", "_spherical_yn", """ Internal function, use `spherical_yn` instead. """) add_newdoc("scipy.special", "_spherical_yn_d", """ Internal function, use `spherical_yn` instead. """) add_newdoc("scipy.special", "_spherical_in", """ Internal function, use `spherical_in` instead. """) add_newdoc("scipy.special", "_spherical_in_d", """ Internal function, use `spherical_in` instead. """) add_newdoc("scipy.special", "_spherical_kn", """ Internal function, use `spherical_kn` instead. """) add_newdoc("scipy.special", "_spherical_kn_d", """ Internal function, use `spherical_kn` instead. """) add_newdoc("scipy.special", "loggamma", r""" loggamma(z, out=None) Principal branch of the logarithm of the Gamma function. Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and extended to the complex plane by analytic continuation. The function has a single branch cut on the negative real axis. .. versionadded:: 0.18.0 Parameters ---------- z : array-like Values in the complex plain at which to compute ``loggamma`` out : ndarray, optional Output array for computed values of ``loggamma`` Returns ------- loggamma : ndarray Values of ``loggamma`` at z. Notes ----- It is not generally true that :math:`\log\Gamma(z) = \log(\Gamma(z))`, though the real parts of the functions do agree. The benefit of not defining ``loggamma`` as :math:`\log(\Gamma(z))` is that the latter function has a complicated branch cut structure whereas ``loggamma`` is analytic except for on the negative real axis. The identities .. math:: \exp(\log\Gamma(z)) &= \Gamma(z) \\ \log\Gamma(z + 1) &= \log(z) + \log\Gamma(z) make ``loggama`` useful for working in complex logspace. However, ``loggamma`` necessarily returns complex outputs for real inputs, so if you want to work only with real numbers use `gammaln`. On the real line the two functions are related by ``exp(loggamma(x)) = gammasgn(x)*exp(gammaln(x))``, though in practice rounding errors will introduce small spurious imaginary components in ``exp(loggamma(x))``. The implementation here is based on [hare1997]_. See also -------- gammaln : logarithm of the absolute value of the Gamma function gammasgn : sign of the gamma function References ---------- .. [hare1997] D.E.G. Hare, *Computing the Principal Branch of log-Gamma*, Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236. """) add_newdoc("scipy.special", "_sinpi", """ Internal function, do not use. """) add_newdoc("scipy.special", "_cospi", """ Internal function, do not use. """) add_newdoc("scipy.special", "owens_t", """ owens_t(h, a) Owen's T Function. The function T(h, a) gives the probability of the event (X > h and 0 < Y < a * X) where X and Y are independent standard normal random variables. Parameters ---------- h: array_like Input value. a: array_like Input value. Returns ------- t: scalar or ndarray Probability of the event (X > h and 0 < Y < a * X), where X and Y are independent standard normal random variables. Examples -------- >>> from scipy import special >>> a = 3.5 >>> h = 0.78 >>> special.owens_t(h, a) 0.10877216734852274 References ---------- .. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000. """)
176,767
24.116226
194
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_ellip_harm.py
from __future__ import division, print_function, absolute_import import threading import numpy as np from ._ufuncs import _ellip_harm from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): r""" Ellipsoidal harmonic functions E^p_n(l) These are also known as Lame functions of the first kind, and are solutions to the Lame equation: .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not returned) corresponding to the solutions. Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree s : float Coordinate p : int Order, can range between [1,2n+1] signm : {1, -1}, optional Sign of prefactor of functions. Can be +/-1. See Notes. signn : {1, -1}, optional Sign of prefactor of functions. Can be +/-1. See Notes. Returns ------- E : float the harmonic :math:`E^p_n(s)` See Also -------- ellip_harm_2, ellip_normal Notes ----- The geometric interpretation of the ellipsoidal functions is explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the sign of prefactors for functions according to their type:: K : +1 L : signm M : signn N : signm*signn .. versionadded:: 0.15.0 References ---------- .. [1] Digital Library of Mathematical Functions 29.12 http://dlmf.nist.gov/29.12 .. [2] Bardhan and Knepley, "Computational science and re-discovery: open-source implementations of ellipsoidal harmonics for problems in potential theory", Comput. Sci. Disc. 5, 014006 (2012) :doi:`10.1088/1749-4699/5/1/014006`. .. [3] David J.and Dechambre P, "Computation of Ellipsoidal Gravity Field Harmonics for small solar system bodies" pp. 30-36, 2000 .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" pp. 418, 2012 Examples -------- >>> from scipy.special import ellip_harm >>> w = ellip_harm(5,8,1,1,2.5) >>> w 2.5 Check that the functions indeed are solutions to the Lame equation: >>> from scipy.interpolate import UnivariateSpline >>> def eigenvalue(f, df, ddf): ... r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f ... return -r.mean(), r.std() >>> s = np.linspace(0.1, 10, 200) >>> k, h, n, p = 8.0, 2.2, 3, 2 >>> E = ellip_harm(h**2, k**2, n, p, s) >>> E_spl = UnivariateSpline(s, E) >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) >>> a, a_err (583.44366156701483, 6.4580890640310646e-11) """ return _ellip_harm(h2, k2, n, p, s, signm, signn) _ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d') def ellip_harm_2(h2, k2, n, p, s): r""" Ellipsoidal harmonic functions F^p_n(l) These are also known as Lame functions of the second kind, and are solutions to the Lame equation: .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not returned) corresponding to the solutions. Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree. p : int Order, can range between [1,2n+1]. s : float Coordinate Returns ------- F : float The harmonic :math:`F^p_n(s)` Notes ----- Lame functions of the second kind are related to the functions of the first kind: .. math:: F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} .. versionadded:: 0.15.0 See Also -------- ellip_harm, ellip_normal Examples -------- >>> from scipy.special import ellip_harm_2 >>> w = ellip_harm_2(5,8,2,1,10) >>> w 0.00108056853382 """ with np.errstate(all='ignore'): return _ellip_harm_2_vec(h2, k2, n, p, s) def _ellip_normal_vec(h2, k2, n, p): return _ellipsoid_norm(h2, k2, n, p) _ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') def ellip_normal(h2, k2, n, p): r""" Ellipsoidal harmonic normalization constants gamma^p_n The normalization constant is defined as .. math:: \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree. p : int Order, can range between [1,2n+1]. Returns ------- gamma : float The normalization constant :math:`\gamma^p_n` See Also -------- ellip_harm, ellip_harm_2 Notes ----- .. versionadded:: 0.15.0 Examples -------- >>> from scipy.special import ellip_normal >>> w = ellip_normal(5,8,3,7) >>> w 1723.38796997 """ with np.errstate(all='ignore'): return _ellip_normal_vec(h2, k2, n, p)
5,328
24.255924
127
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/orthogonal.py
""" A collection of functions to find the weights and abscissas for Gaussian Quadrature. These calculations are done by finding the eigenvalues of a tridiagonal matrix whose entries are dependent on the coefficients in the recursion formula for the orthogonal polynomials with the corresponding weighting function over the interval. Many recursion relations for orthogonal polynomials are given: .. math:: a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x) The recursion relation of interest is .. math:: P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x) where :math:`P` has a different normalization than :math:`f`. The coefficients can be found as: .. math:: A_n = -a2n / a3n \\qquad B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2 where .. math:: h_n = \\int_a^b w(x) f_n(x)^2 assume: .. math:: P_0 (x) = 1 \\qquad P_{-1} (x) == 0 For the mathematical background, see [golub.welsch-1969-mathcomp]_ and [abramowitz.stegun-1965]_. References ---------- .. [golub.welsch-1969-mathcomp] Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10. .. [abramowitz.stegun-1965] Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of Mathematical Functions: with Formulas, Graphs, and Mathematical Tables*. Gaithersburg, MD: National Bureau of Standards. http://www.math.sfu.ca/~cbm/aands/ .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. """ # # Author: Travis Oliphant 2000 # Updated Sep. 2003 (fixed bugs --- tested to be accurate) from __future__ import division, print_function, absolute_import # Scipy imports. import numpy as np from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int, hstack, arccos, arange) from scipy import linalg from scipy.special import airy # Local imports. from . import _ufuncs from . import _ufuncs as cephes _gam = cephes.gamma from . import specfun _polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', 'jacobi', 'laguerre', 'genlaguerre', 'hermite', 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi'] # Correspondence between new and old names of root functions _rootfuns_map = {'roots_legendre': 'p_roots', 'roots_chebyt': 't_roots', 'roots_chebyu': 'u_roots', 'roots_chebyc': 'c_roots', 'roots_chebys': 's_roots', 'roots_jacobi': 'j_roots', 'roots_laguerre': 'l_roots', 'roots_genlaguerre': 'la_roots', 'roots_hermite': 'h_roots', 'roots_hermitenorm': 'he_roots', 'roots_gegenbauer': 'cg_roots', 'roots_sh_legendre': 'ps_roots', 'roots_sh_chebyt': 'ts_roots', 'roots_sh_chebyu': 'us_roots', 'roots_sh_jacobi': 'js_roots'} _evalfuns = ['eval_legendre', 'eval_chebyt', 'eval_chebyu', 'eval_chebyc', 'eval_chebys', 'eval_jacobi', 'eval_laguerre', 'eval_genlaguerre', 'eval_hermite', 'eval_hermitenorm', 'eval_gegenbauer', 'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', 'eval_sh_jacobi'] __all__ = _polyfuns + list(_rootfuns_map.keys()) + _evalfuns + ['poch', 'binom'] class orthopoly1d(np.poly1d): def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, limits=None, monic=False, eval_func=None): equiv_weights = [weights[k] / wfunc(roots[k]) for k in range(len(roots))] mu = sqrt(hn) if monic: evf = eval_func if evf: knn = kn eval_func = lambda x: evf(x) / knn mu = mu / abs(kn) kn = 1.0 # compute coefficients from roots, then scale poly = np.poly1d(roots, r=True) np.poly1d.__init__(self, poly.coeffs * float(kn)) # TODO: In numpy 1.13, there is no need to use __dict__ to access attributes self.__dict__['weights'] = np.array(list(zip(roots, weights, equiv_weights))) self.__dict__['weight_func'] = wfunc self.__dict__['limits'] = limits self.__dict__['normcoef'] = mu # Note: eval_func will be discarded on arithmetic self.__dict__['_eval_func'] = eval_func def __call__(self, v): if self._eval_func and not isinstance(v, np.poly1d): return self._eval_func(v) else: return np.poly1d.__call__(self, v) def _scale(self, p): if p == 1.0: return try: self._coeffs except AttributeError: self.__dict__['coeffs'] *= p else: # the coeffs attr is be made private in future versions of numpy self._coeffs *= p evf = self._eval_func if evf: self.__dict__['_eval_func'] = lambda x: evf(x) * p self.__dict__['normcoef'] *= p def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu): """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) Returns the roots (x) of an nth order orthogonal polynomial, and weights (w) to use in appropriate Gaussian quadrature with that orthogonal polynomial. The polynomials have the recurrence relation P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) an_func(n) should return A_n sqrt_bn_func(n) should return sqrt(B_n) mu ( = h_0 ) is the integral of the weight over the orthogonal interval """ k = np.arange(n, dtype='d') c = np.zeros((2, n)) c[0,1:] = bn_func(k[1:]) c[1,:] = an_func(k) x = linalg.eigvals_banded(c, overwrite_a_band=True) # improve roots by one application of Newton's method y = f(n, x) dy = df(n, x) x -= y/dy fm = f(n-1, x) fm /= np.abs(fm).max() dy /= np.abs(dy).max() w = 1.0 / (fm * dy) if symmetrize: w = (w + w[::-1]) / 2 x = (x - x[::-1]) / 2 w *= mu0 / w.sum() if mu: return x, w, mu0 else: return x, w # Jacobi Polynomials 1 P^(alpha,beta)_n(x) def roots_jacobi(n, alpha, beta, mu=False): r"""Gauss-Jacobi quadrature. Computes the sample points and weights for Gauss-Jacobi quadrature. The sample points are the roots of the n-th degree Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`f(x) = (1 - x)^{\alpha} (1 + x)^{\beta}`. Parameters ---------- n : int quadrature order alpha : float alpha must be > -1 beta : float beta must be > -1 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha <= -1 or beta <= -1: raise ValueError("alpha and beta must be greater than -1.") if alpha == 0.0 and beta == 0.0: return roots_legendre(m, mu) if alpha == beta: return roots_gegenbauer(m, alpha+0.5, mu) mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1) a = alpha b = beta if a + b == 0.0: an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0) else: an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), (b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2))) bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \ * np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1))) f = lambda n, x: cephes.eval_jacobi(n, a, b, x) df = lambda n, x: 0.5 * (n + a + b + 1) \ * cephes.eval_jacobi(n-1, a+1, b+1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) def jacobi(n, alpha, beta, monic=False): r"""Jacobi polynomial. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)} + (\beta - \alpha - (\alpha + \beta + 2)x) \frac{d}{dx}P_n^{(\alpha, \beta)} + n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0 for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. alpha : float Parameter, must be greater than -1. beta : float Parameter, must be greater than -1. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Jacobi polynomial. Notes ----- For fixed :math:`\alpha, \beta`, the polynomials :math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, eval_func=np.ones_like) x, w, mu = roots_jacobi(n, alpha, beta, mu=True) ab1 = alpha + beta + 1.0 hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1) hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1) kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1) # here kn = coefficient on x^n term p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, lambda x: eval_jacobi(n, alpha, beta, x)) return p # Jacobi Polynomials shifted G_n(p,q,x) def roots_sh_jacobi(n, p1, q1, mu=False): """Gauss-Jacobi (shifted) quadrature. Computes the sample points and weights for Gauss-Jacobi (shifted) quadrature. The sample points are the roots of the n-th degree shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`f(x) = (1 - x)^{p-q} x^{q-1}` Parameters ---------- n : int quadrature order p1 : float (p1 - q1) must be > -1 q1 : float q1 must be > 0 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ if (p1-q1) <= -1 or q1 <= 0: raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.") x, w, m = roots_jacobi(n, p1-q1, q1-1, True) x = (x + 1) / 2 scale = 2.0**p1 w /= scale m /= scale if mu: return x, w, m else: return x, w def sh_jacobi(n, p, q, monic=False): r"""Shifted Jacobi polynomial. Defined by .. math:: G_n^{(p, q)}(x) = \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1), where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial. Parameters ---------- n : int Degree of the polynomial. p : float Parameter, must have :math:`p > q - 1`. q : float Parameter, must be greater than 0. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- G : orthopoly1d Shifted Jacobi polynomial. Notes ----- For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are orthogonal over :math:`[0, 1]` with weight function :math:`(1 - x)^{p - q}x^{q - 1}`. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.) if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, eval_func=np.ones_like) n1 = n x, w, mu0 = roots_sh_jacobi(n1, p, q, mu=True) hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1) hn /= (2 * n + p) * (_gam(2 * n + p)**2) # kn = 1.0 in standard form so monic is redundant. Kept for compatibility. kn = 1.0 pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: eval_sh_jacobi(n, p, q, x)) return pp # Generalized Laguerre L^(alpha)_n(x) def roots_genlaguerre(n, alpha, mu=False): r"""Gauss-generalized Laguerre quadrature. Computes the sample points and weights for Gauss-generalized Laguerre quadrature. The sample points are the roots of the n-th degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, \infty]` with weight function :math:`f(x) = x^{\alpha} e^{-x}`. Parameters ---------- n : int quadrature order alpha : float alpha must be > -1 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha < -1: raise ValueError("alpha must be greater than -1.") mu0 = cephes.gamma(alpha + 1) if m == 1: x = np.array([alpha+1.0], 'd') w = np.array([mu0], 'd') if mu: return x, w, mu0 else: return x, w an_func = lambda k: 2 * k + alpha + 1 bn_func = lambda k: -np.sqrt(k * (k + alpha)) f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x) df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x) - (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) def genlaguerre(n, alpha, monic=False): r"""Generalized (associated) Laguerre polynomial. Defined to be the solution of .. math:: x\frac{d^2}{dx^2}L_n^{(\alpha)} + (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)} + nL_n^{(\alpha)} = 0, where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. alpha : float Parameter, must be greater than -1. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- L : orthopoly1d Generalized Laguerre polynomial. Notes ----- For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}` are orthogonal over :math:`[0, \infty)` with weight function :math:`e^{-x}x^\alpha`. The Laguerre polynomials are the special case where :math:`\alpha = 0`. See Also -------- laguerre : Laguerre polynomial. """ if alpha <= -1: raise ValueError("alpha must be > -1") if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_genlaguerre(n1, alpha, mu=True) wfunc = lambda x: exp(-x) * x**alpha if n == 0: x, w = [], [] hn = _gam(n + alpha + 1) / _gam(n + 1) kn = (-1)**n / _gam(n + 1) p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic, lambda x: eval_genlaguerre(n, alpha, x)) return p # Laguerre L_n(x) def roots_laguerre(n, mu=False): r"""Gauss-Laguerre quadrature. Computes the sample points and weights for Gauss-Laguerre quadrature. The sample points are the roots of the n-th degree Laguerre polynomial, :math:`L_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, \infty]` with weight function :math:`f(x) = e^{-x}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.laguerre.laggauss """ return roots_genlaguerre(n, 0.0, mu=mu) def laguerre(n, monic=False): r"""Laguerre polynomial. Defined to be the solution of .. math:: x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0; :math:`L_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- L : orthopoly1d Laguerre Polynomial. Notes ----- The polynomials :math:`L_n` are orthogonal over :math:`[0, \infty)` with weight function :math:`e^{-x}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_laguerre(n1, mu=True) if n == 0: x, w = [], [] hn = 1.0 kn = (-1)**n / _gam(n + 1) p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic, lambda x: eval_laguerre(n, x)) return p # Hermite 1 H_n(x) def roots_hermite(n, mu=False): r"""Gauss-Hermite (physicst's) quadrature. Computes the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the n-th degree Hermite polynomial, :math:`H_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights Notes ----- For small n up to 150 a modified version of the Golub-Welsch algorithm is used. Nodes are computed from the eigenvalue problem and improved by one step of a Newton iteration. The weights are computed from the well-known analytical formula. For n larger than 150 an optimal asymptotic algorithm is applied which computes nodes and weights in a numerically stable manner. The algorithm has linear runtime making computation for very large n (several thousand or more) feasible. See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.hermite.hermgauss roots_hermitenorm References ---------- .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = np.sqrt(np.pi) if n <= 150: an_func = lambda k: 0.0*k bn_func = lambda k: np.sqrt(k/2.0) f = cephes.eval_hermite df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) else: nodes, weights = _roots_hermite_asy(m) if mu: return nodes, weights, mu0 else: return nodes, weights def _compute_tauk(n, k, maxit=5): """Helper function for Tricomi initial guesses For details, see formula 3.1 in lemma 3.1 in the original paper. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots :math:`\tau_k` to compute maxit : int Number of Newton maxit performed, the default value of 5 is sufficient. Returns ------- tauk : ndarray Roots of equation 3.1 See Also -------- initial_nodes_a roots_hermite_asy """ a = n % 2 - 0.5 c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0) f = lambda x: x - sin(x) - c df = lambda x: 1.0 - cos(x) xi = 0.5*pi for i in range(maxit): xi = xi - f(xi)/df(xi) return xi def _initial_nodes_a(n, k): r"""Tricomi initial guesses Computes an initial approximation to the square of the `k`-th (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The formula is the one from lemma 3.1 in the original paper. The guesses are accurate except in the region near :math:`\sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate roots See Also -------- initial_nodes roots_hermite_asy """ tauk = _compute_tauk(n, k) sigk = cos(0.5*tauk)**2 a = n % 2 - 0.5 nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 # Initial approximation of Hermite roots (square) xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25) return xksq def _initial_nodes_b(n, k): r"""Gatteschi initial guesses Computes an initial approximation to the square of the `k`-th (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The formula is the one from lemma 3.2 in the original paper. The guesses are accurate in the region just below :math:`\sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate root See Also -------- initial_nodes roots_hermite_asy """ a = n % 2 - 0.5 nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 # Airy roots by approximation ak = specfun.airyzo(k.max(), 1)[0][::-1] # Initial approximation of Hermite roots (square) xksq = (nu + 2.0**(2.0/3.0) * ak * nu**(1.0/3.0) + 1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) + (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) + (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) - (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0)) return xksq def _initial_nodes(n): """Initial guesses for the Hermite roots Computes an initial approximation to the non-negative roots :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The Tricomi and Gatteschi initial guesses are used in the region where they are accurate. Parameters ---------- n : int Quadrature order Returns ------- xk : ndarray Approximate roots See Also -------- roots_hermite_asy """ # Turnover point # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules fit = 0.49082003*n - 4.37859653 turnover = around(fit).astype(int) # Compute all approximations ia = arange(1, int(floor(n*0.5)+1)) ib = ia[::-1] xasq = _initial_nodes_a(n, ia[:turnover+1]) xbsq = _initial_nodes_b(n, ib[turnover+1:]) # Combine iv = sqrt(hstack([xasq, xbsq])) # Central node is always zero if n % 2 == 1: iv = hstack([0.0, iv]) return iv def _pbcf(n, theta): r"""Asymptotic series expansion of parabolic cylinder function The implementation is based on sections 3.2 and 3.3 from the original paper. Compared to the published version this code adds one more term to the asymptotic series. The detailed formulas can be found at [parabolic-asymptotics]_. The evaluation is done in a transformed variable :math:`\theta := \arccos(t)` where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order theta : ndarray Transformed position variable Returns ------- U : ndarray Value of the parabolic cylinder function :math:`U(a, \theta)`. Ud : ndarray Value of the derivative :math:`U^{\prime}(a, \theta)` of the parabolic cylinder function. See Also -------- roots_hermite_asy References ---------- .. [parabolic-asymptotics] http://dlmf.nist.gov/12.10#vii """ st = sin(theta) ct = cos(theta) # http://dlmf.nist.gov/12.10#vii mu = 2.0*n + 1.0 # http://dlmf.nist.gov/12.10#E23 eta = 0.5*theta - 0.5*st*ct # http://dlmf.nist.gov/12.10#E39 zeta = -(3.0*eta/2.0) ** (2.0/3.0) # http://dlmf.nist.gov/12.10#E40 phi = (-zeta / st**2) ** (0.25) # Coefficients # http://dlmf.nist.gov/12.10#E43 a0 = 1.0 a1 = 0.10416666666666666667 a2 = 0.08355034722222222222 a3 = 0.12822657455632716049 a4 = 0.29184902646414046425 a5 = 0.88162726744375765242 b0 = 1.0 b1 = -0.14583333333333333333 b2 = -0.09874131944444444444 b3 = -0.14331205391589506173 b4 = -0.31722720267841354810 b5 = -0.94242914795712024914 # Polynomials # http://dlmf.nist.gov/12.10#E9 # http://dlmf.nist.gov/12.10#E10 ctp = ct ** arange(16).reshape((-1,1)) u0 = 1.0 u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0 u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0 u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0 u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0 u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:] - 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0 v0 = 1.0 v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0 v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0 v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0 v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0 v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:] + 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0 # Airy Evaluation (Bi and Bip unused) Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta) # Prefactor for U P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi # Terms for U # http://dlmf.nist.gov/12.10#E42 phip = phi ** arange(6, 31, 6).reshape((-1,1)) A0 = b0*u0 A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3 A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6 B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2 B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5 B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8 # U # http://dlmf.nist.gov/12.10#E35 U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) + Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0)) # Prefactor for derivative of U Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi # Terms for derivative of U # http://dlmf.nist.gov/12.10#E46 C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4 C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7 D0 = a0*v0 D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3 D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6 # Derivative of U # http://dlmf.nist.gov/12.10#E36 Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) + Aip * (D0 + D1/mu**2.0 + D2/mu**4.0)) return U, Ud def _newton(n, x_initial, maxit=5): """Newton iteration for polishing the asymptotic approximation to the zeros of the Hermite polynomials. Parameters ---------- n : int Quadrature order x_initial : ndarray Initial guesses for the roots maxit : int Maximal number of Newton iterations. The default 5 is sufficient, usually only one or two steps are needed. Returns ------- nodes : ndarray Quadrature nodes weights : ndarray Quadrature weights See Also -------- roots_hermite_asy """ # Variable transformation mu = sqrt(2.0*n + 1.0) t = x_initial / mu theta = arccos(t) # Newton iteration for i in range(maxit): u, ud = _pbcf(n, theta) dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud) theta = theta + dtheta if max(abs(dtheta)) < 1e-14: break # Undo variable transformation x = mu * cos(theta) # Central node is always zero if n % 2 == 1: x[0] = 0.0 # Compute weights w = exp(-x**2) / (2.0*ud**2) return x, w def _roots_hermite_asy(n): r"""Gauss-Hermite (physicst's) quadrature for large n. Computes the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the n-th degree Hermite polynomial, :math:`H_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. This method relies on asymptotic expansions which work best for n > 150. The algorithm has linear runtime making computation for very large n feasible. Parameters ---------- n : int quadrature order Returns ------- nodes : ndarray Quadrature nodes weights : ndarray Quadrature weights See Also -------- roots_hermite References ---------- .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. """ iv = _initial_nodes(n) nodes, weights = _newton(n, iv) # Combine with negative parts if n % 2 == 0: nodes = hstack([-nodes[::-1], nodes]) weights = hstack([weights[::-1], weights]) else: nodes = hstack([-nodes[-1:0:-1], nodes]) weights = hstack([weights[-1:0:-1], weights]) # Scale weights weights *= sqrt(pi) / sum(weights) return nodes, weights def hermite(n, monic=False): r"""Physicist's Hermite polynomial. Defined by .. math:: H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2}; :math:`H_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- H : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math:`H_n` are orthogonal over :math:`(-\infty, \infty)` with weight function :math:`e^{-x^2}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_hermite(n1, mu=True) wfunc = lambda x: exp(-x * x) if n == 0: x, w = [], [] hn = 2**n * _gam(n + 1) * sqrt(pi) kn = 2**n p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, lambda x: eval_hermite(n, x)) return p # Hermite 2 He_n(x) def roots_hermitenorm(n, mu=False): r"""Gauss-Hermite (statistician's) quadrature. Computes the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the n-th degree Hermite polynomial, :math:`He_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2/2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights Notes ----- For small n up to 150 a modified version of the Golub-Welsch algorithm is used. Nodes are computed from the eigenvalue problem and improved by one step of a Newton iteration. The weights are computed from the well-known analytical formula. For n larger than 150 an optimal asymptotic algorithm is used which computes nodes and weights in a numerical stable manner. The algorithm has linear runtime making computation for very large n (several thousand or more) feasible. See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.hermite_e.hermegauss """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = np.sqrt(2.0*np.pi) if n <= 150: an_func = lambda k: 0.0*k bn_func = lambda k: np.sqrt(k) f = cephes.eval_hermitenorm df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) else: nodes, weights = _roots_hermite_asy(m) # Transform nodes *= sqrt(2) weights *= sqrt(2) if mu: return nodes, weights, mu0 else: return nodes, weights def hermitenorm(n, monic=False): r"""Normalized (probabilist's) Hermite polynomial. Defined by .. math:: He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}; :math:`He_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- He : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math:`He_n` are orthogonal over :math:`(-\infty, \infty)` with weight function :math:`e^{-x^2/2}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_hermitenorm(n1, mu=True) wfunc = lambda x: exp(-x * x / 2.0) if n == 0: x, w = [], [] hn = sqrt(2 * pi) * _gam(n + 1) kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic, eval_func=lambda x: eval_hermitenorm(n, x)) return p # The remainder of the polynomials can be derived from the ones above. # Ultraspherical (Gegenbauer) C^(alpha)_n(x) def roots_gegenbauer(n, alpha, mu=False): r"""Gauss-Gegenbauer quadrature. Computes the sample points and weights for Gauss-Gegenbauer quadrature. The sample points are the roots of the n-th degree Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`f(x) = (1 - x^2)^{\alpha - 1/2}`. Parameters ---------- n : int quadrature order alpha : float alpha must be > -0.5 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha < -0.5: raise ValueError("alpha must be greater than -0.5.") elif alpha == 0.0: # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x) # strictly, we should just error out here, since the roots are not # really defined, but we used to return something useful, so let's # keep doing so. return roots_chebyt(n, mu) mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1) an_func = lambda k: 0.0 * k bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1) / (4 * (k + alpha) * (k + alpha - 1))) f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x) df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x) + (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) def gegenbauer(n, alpha, monic=False): r"""Gegenbauer (ultraspherical) polynomial. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)} - (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)} + n(n + 2\alpha)C_n^{(\alpha)} = 0 for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- C : orthopoly1d Gegenbauer polynomial. Notes ----- The polynomials :math:`C_n^{(\alpha)}` are orthogonal over :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha - 1/2)}`. """ base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic) if monic: return base # Abrahmowitz and Stegan 22.5.20 factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) / _gam(2*alpha) / _gam(alpha + 0.5 + n)) base._scale(factor) base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x) return base # Chebyshev of the first kind: T_n(x) = # n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x) # Computed anew. def roots_chebyt(n, mu=False): r"""Gauss-Chebyshev (first kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree Chebyshev polynomial of the first kind, :math:`T_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`f(x) = 1/\sqrt{1 - x^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.chebyshev.chebgauss """ m = int(n) if n < 1 or n != m: raise ValueError('n must be a positive integer.') x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m)) w = np.full_like(x, pi/m) if mu: return x, w, pi else: return x, w def chebyt(n, monic=False): r"""Chebyshev polynomial of the first kind. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0; :math:`T_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- T : orthopoly1d Chebyshev polynomial of the first kind. Notes ----- The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x^2)^{-1/2}`. See Also -------- chebyu : Chebyshev polynomial of the second kind. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: 1.0 / sqrt(1 - x * x) if n == 0: return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic, lambda x: eval_chebyt(n, x)) n1 = n x, w, mu = roots_chebyt(n1, mu=True) hn = pi / 2 kn = 2**(n - 1) p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, lambda x: eval_chebyt(n, x)) return p # Chebyshev of the second kind # U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x) def roots_chebyu(n, mu=False): r"""Gauss-Chebyshev (second kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree Chebyshev polynomial of the second kind, :math:`U_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`f(x) = \sqrt{1 - x^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ m = int(n) if n < 1 or n != m: raise ValueError('n must be a positive integer.') t = np.arange(m, 0, -1) * pi / (m + 1) x = np.cos(t) w = pi * np.sin(t)**2 / (m + 1) if mu: return x, w, pi / 2 else: return x, w def chebyu(n, monic=False): r"""Chebyshev polynomial of the second kind. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n + n(n + 2)U_n = 0; :math:`U_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- U : orthopoly1d Chebyshev polynomial of the second kind. Notes ----- The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x^2)^{1/2}`. See Also -------- chebyt : Chebyshev polynomial of the first kind. """ base = jacobi(n, 0.5, 0.5, monic=monic) if monic: return base factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5) base._scale(factor) return base # Chebyshev of the first kind C_n(x) def roots_chebyc(n, mu=False): r"""Gauss-Chebyshev (first kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree Chebyshev polynomial of the first kind, :math:`C_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` with weight function :math:`f(x) = 1/\sqrt{1 - (x/2)^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ x, w, m = roots_chebyt(n, True) x *= 2 w *= 2 m *= 2 if mu: return x, w, m else: return x, w def chebyc(n, monic=False): r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`. Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the nth Chebychev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- C : orthopoly1d Chebyshev polynomial of the first kind on :math:`[-2, 2]`. Notes ----- The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]` with weight function :math:`1/\sqrt{1 - (x/2)^2}`. See Also -------- chebyt : Chebyshev polynomial of the first kind. References ---------- .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" Section 22. National Bureau of Standards, 1972. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_chebyc(n1, mu=True) if n == 0: x, w = [], [] hn = 4 * pi * ((n == 0) + 1) kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic) if not monic: p._scale(2.0 / p(2)) p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x) return p # Chebyshev of the second kind S_n(x) def roots_chebys(n, mu=False): r"""Gauss-Chebyshev (second kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree Chebyshev polynomial of the second kind, :math:`S_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` with weight function :math:`f(x) = \sqrt{1 - (x/2)^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ x, w, m = roots_chebyu(n, True) x *= 2 w *= 2 m *= 2 if mu: return x, w, m else: return x, w def chebys(n, monic=False): r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`. Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the nth Chebychev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- S : orthopoly1d Chebyshev polynomial of the second kind on :math:`[-2, 2]`. Notes ----- The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]` with weight function :math:`\sqrt{1 - (x/2)}^2`. See Also -------- chebyu : Chebyshev polynomial of the second kind References ---------- .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" Section 22. National Bureau of Standards, 1972. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_chebys(n1, mu=True) if n == 0: x, w = [], [] hn = pi kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic) if not monic: factor = (n + 1.0) / p(2) p._scale(factor) p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x) return p # Shifted Chebyshev of the first kind T^*_n(x) def roots_sh_chebyt(n, mu=False): r"""Gauss-Chebyshev (first kind, shifted) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`f(x) = 1/\sqrt{x - x^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ xw = roots_chebyt(n, mu) return ((xw[0] + 1) / 2,) + xw[1:] def sh_chebyt(n, monic=False): r"""Shifted Chebyshev polynomial of the first kind. Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth Chebyshev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- T : orthopoly1d Shifted Chebyshev polynomial of the first kind. Notes ----- The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]` with weight function :math:`(x - x^2)^{-1/2}`. """ base = sh_jacobi(n, 0.0, 0.5, monic=monic) if monic: return base if n > 0: factor = 4**n / 2.0 else: factor = 1.0 base._scale(factor) return base # Shifted Chebyshev of the second kind U^*_n(x) def roots_sh_chebyu(n, mu=False): r"""Gauss-Chebyshev (second kind, shifted) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the n-th degree shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`f(x) = \sqrt{x - x^2}`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ x, w, m = roots_chebyu(n, True) x = (x + 1) / 2 m_us = cephes.beta(1.5, 1.5) w *= m_us / m if mu: return x, w, m_us else: return x, w def sh_chebyu(n, monic=False): r"""Shifted Chebyshev polynomial of the second kind. Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth Chebyshev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- U : orthopoly1d Shifted Chebyshev polynomial of the second kind. Notes ----- The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]` with weight function :math:`(x - x^2)^{1/2}`. """ base = sh_jacobi(n, 2.0, 1.5, monic=monic) if monic: return base factor = 4**n base._scale(factor) return base # Legendre def roots_legendre(n, mu=False): r"""Gauss-Legendre quadrature. Computes the sample points and weights for Gauss-Legendre quadrature. The sample points are the roots of the n-th degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`f(x) = 1.0`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) def legendre(n, monic=False): r"""Legendre polynomial. Defined to be the solution of .. math:: \frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right] + n(n + 1)P_n(x) = 0; :math:`P_n(x)` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Legendre polynomial. Notes ----- The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]` with weight function 1. Examples -------- Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0): >>> from scipy.special import legendre >>> legendre(3) poly1d([ 2.5, 0. , -1.5, 0. ]) """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_legendre(n1, mu=True) if n == 0: x, w = [], [] hn = 2.0 / (2 * n + 1) kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1), monic=monic, eval_func=lambda x: eval_legendre(n, x)) return p # Shifted Legendre P^*_n(x) def roots_sh_legendre(n, mu=False): r"""Gauss-Legendre (shifted) quadrature. Computes the sample points and weights for Gauss-Legendre quadrature. The sample points are the roots of the n-th degree shifted Legendre polynomial :math:`P^*_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`f(x) = 1.0`. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad """ x, w = roots_legendre(n) x = (x + 1) / 2 w /= 2 if mu: return x, w, 1.0 else: return x, w def sh_legendre(n, monic=False): r"""Shifted Legendre polynomial. Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth Legendre polynomial. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Shifted Legendre polynomial. Notes ----- The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]` with weight function 1. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: 0.0 * x + 1.0 if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic, lambda x: eval_sh_legendre(n, x)) x, w, mu0 = roots_sh_legendre(n, mu=True) hn = 1.0 / (2 * n + 1.0) kn = _gam(2 * n + 1) / _gam(n + 1)**2 p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: eval_sh_legendre(n, x)) return p # ----------------------------------------------------------------------------- # Code for backwards compatibility # ----------------------------------------------------------------------------- # Import functions in case someone is still calling the orthogonal # module directly. (They shouldn't be; it's not in the public API). poch = cephes.poch from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer, eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc, eval_sh_chebyt, eval_sh_chebyu, eval_legendre, eval_sh_legendre, eval_genlaguerre, eval_laguerre, eval_hermite, eval_hermitenorm) # Make the old root function names an alias for the new ones _modattrs = globals() for newfun, oldfun in _rootfuns_map.items(): _modattrs[oldfun] = _modattrs[newfun] __all__.append(oldfun)
58,868
27.275216
139
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/sf_error.py
"""Warnings and Exceptions that can be raised by special functions.""" import warnings class SpecialFunctionWarning(Warning): """Warning that can be emitted by special functions.""" pass warnings.simplefilter("always", category=SpecialFunctionWarning) class SpecialFunctionError(Exception): """Exception that can be raised by special functions.""" pass
375
22.5
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_spherical_bessel.py
from __future__ import division, print_function, absolute_import from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in, _spherical_kn, _spherical_jn_d, _spherical_yn_d, _spherical_in_d, _spherical_kn_d) def spherical_jn(n, z, derivative=False): r"""Spherical Bessel function of the first kind or its derivative. Defined as [1]_, .. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z), where :math:`J_n` is the Bessel function of the first kind. Parameters ---------- n : int, array_like Order of the Bessel function (n >= 0). z : complex or float, array_like Argument of the Bessel function. derivative : bool, optional If True, the value of the derivative (rather than the function itself) is returned. Returns ------- jn : ndarray Notes ----- For real arguments greater than the order, the function is computed using the ascending recurrence [2]_. For small real or complex arguments, the definitional relation to the cylindrical Bessel function of the first kind is used. The derivative is computed using the relations [3]_, .. math:: j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z). j_0'(z) = -j_1(z) .. versionadded:: 0.18.0 References ---------- .. [1] http://dlmf.nist.gov/10.47.E3 .. [2] http://dlmf.nist.gov/10.51.E1 .. [3] http://dlmf.nist.gov/10.51.E2 """ if derivative: return _spherical_jn_d(n, z) else: return _spherical_jn(n, z) def spherical_yn(n, z, derivative=False): r"""Spherical Bessel function of the second kind or its derivative. Defined as [1]_, .. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z), where :math:`Y_n` is the Bessel function of the second kind. Parameters ---------- n : int, array_like Order of the Bessel function (n >= 0). z : complex or float, array_like Argument of the Bessel function. derivative : bool, optional If True, the value of the derivative (rather than the function itself) is returned. Returns ------- yn : ndarray Notes ----- For real arguments, the function is computed using the ascending recurrence [2]_. For complex arguments, the definitional relation to the cylindrical Bessel function of the second kind is used. The derivative is computed using the relations [3]_, .. math:: y_n' = y_{n-1} - \frac{n + 1}{z} y_n. y_0' = -y_1 .. versionadded:: 0.18.0 References ---------- .. [1] http://dlmf.nist.gov/10.47.E4 .. [2] http://dlmf.nist.gov/10.51.E1 .. [3] http://dlmf.nist.gov/10.51.E2 """ if derivative: return _spherical_yn_d(n, z) else: return _spherical_yn(n, z) def spherical_in(n, z, derivative=False): r"""Modified spherical Bessel function of the first kind or its derivative. Defined as [1]_, .. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z), where :math:`I_n` is the modified Bessel function of the first kind. Parameters ---------- n : int, array_like Order of the Bessel function (n >= 0). z : complex or float, array_like Argument of the Bessel function. derivative : bool, optional If True, the value of the derivative (rather than the function itself) is returned. Returns ------- in : ndarray Notes ----- The function is computed using its definitional relation to the modified cylindrical Bessel function of the first kind. The derivative is computed using the relations [2]_, .. math:: i_n' = i_{n-1} - \frac{n + 1}{z} i_n. i_1' = i_0 .. versionadded:: 0.18.0 References ---------- .. [1] http://dlmf.nist.gov/10.47.E7 .. [2] http://dlmf.nist.gov/10.51.E5 """ if derivative: return _spherical_in_d(n, z) else: return _spherical_in(n, z) def spherical_kn(n, z, derivative=False): r"""Modified spherical Bessel function of the second kind or its derivative. Defined as [1]_, .. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z), where :math:`K_n` is the modified Bessel function of the second kind. Parameters ---------- n : int, array_like Order of the Bessel function (n >= 0). z : complex or float, array_like Argument of the Bessel function. derivative : bool, optional If True, the value of the derivative (rather than the function itself) is returned. Returns ------- kn : ndarray Notes ----- The function is computed using its definitional relation to the modified cylindrical Bessel function of the second kind. The derivative is computed using the relations [2]_, .. math:: k_n' = -k_{n-1} - \frac{n + 1}{z} k_n. k_0' = -k_1 .. versionadded:: 0.18.0 References ---------- .. [1] http://dlmf.nist.gov/10.47.E9 .. [2] http://dlmf.nist.gov/10.51.E5 """ if derivative: return _spherical_kn_d(n, z) else: return _spherical_kn(n, z)
5,235
24.417476
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/__init__.py
""" ======================================== Special functions (:mod:`scipy.special`) ======================================== .. module:: scipy.special Nearly all of the functions below are universal functions and follow broadcasting and automatic array-looping rules. Exceptions are noted. .. seealso:: `scipy.special.cython_special` -- Typed Cython versions of special functions Error handling ============== Errors are handled by returning NaNs or other appropriate values. Some of the special function routines can emit warnings or raise exceptions when an error occurs. By default this is disabled; to query and control the current error handling state the following functions are provided. .. autosummary:: :toctree: generated/ geterr -- Get the current way of handling special-function errors. seterr -- Set how special-function errors are handled. errstate -- Context manager for special-function error handling. SpecialFunctionWarning -- Warning that can be emitted by special functions. SpecialFunctionError -- Exception that can be raised by special functions. Available functions =================== Airy functions -------------- .. autosummary:: :toctree: generated/ airy -- Airy functions and their derivatives. airye -- Exponentially scaled Airy functions and their derivatives. ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative. bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative. itairy -- Integrals of Airy functions Elliptic Functions and Integrals -------------------------------- .. autosummary:: :toctree: generated/ ellipj -- Jacobian elliptic functions ellipk -- Complete elliptic integral of the first kind. ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1 ellipkinc -- Incomplete elliptic integral of the first kind ellipe -- Complete elliptic integral of the second kind ellipeinc -- Incomplete elliptic integral of the second kind Bessel Functions ---------------- .. autosummary:: :toctree: generated/ jv -- Bessel function of the first kind of real order and complex argument. jve -- Exponentially scaled Bessel function of order `v`. yn -- Bessel function of the second kind of integer order and real argument. yv -- Bessel function of the second kind of real order and complex argument. yve -- Exponentially scaled Bessel function of the second kind of real order. kn -- Modified Bessel function of the second kind of integer order `n` kv -- Modified Bessel function of the second kind of real order `v` kve -- Exponentially scaled modified Bessel function of the second kind. iv -- Modified Bessel function of the first kind of real order. ive -- Exponentially scaled modified Bessel function of the first kind hankel1 -- Hankel function of the first kind hankel1e -- Exponentially scaled Hankel function of the first kind hankel2 -- Hankel function of the second kind hankel2e -- Exponentially scaled Hankel function of the second kind The following is not an universal function: .. autosummary:: :toctree: generated/ lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x). Zeros of Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^^ These are not universal functions: .. autosummary:: :toctree: generated/ jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'. jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x). jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x). yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x). ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x). y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero. y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero. y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. Faster versions of common Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autosummary:: :toctree: generated/ j0 -- Bessel function of the first kind of order 0. j1 -- Bessel function of the first kind of order 1. y0 -- Bessel function of the second kind of order 0. y1 -- Bessel function of the second kind of order 1. i0 -- Modified Bessel function of order 0. i0e -- Exponentially scaled modified Bessel function of order 0. i1 -- Modified Bessel function of order 1. i1e -- Exponentially scaled modified Bessel function of order 1. k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`. k0e -- Exponentially scaled modified Bessel function K of order 0 k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. k1e -- Exponentially scaled modified Bessel function K of order 1 Integrals of Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autosummary:: :toctree: generated/ itj0y0 -- Integrals of Bessel functions of order 0 it2j0y0 -- Integrals related to Bessel functions of order 0 iti0k0 -- Integrals of modified Bessel functions of order 0 it2i0k0 -- Integrals related to modified Bessel functions of order 0 besselpoly -- [+]Weighted integral of a Bessel function. Derivatives of Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autosummary:: :toctree: generated/ jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`. yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`. kvp -- Compute nth derivative of real-order modified Bessel function Kv(z) ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`. h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`. Spherical Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autosummary:: :toctree: generated/ spherical_jn -- Spherical Bessel function of the first kind or its derivative. spherical_yn -- Spherical Bessel function of the second kind or its derivative. spherical_in -- Modified spherical Bessel function of the first kind or its derivative. spherical_kn -- Modified spherical Bessel function of the second kind or its derivative. Riccati-Bessel Functions ^^^^^^^^^^^^^^^^^^^^^^^^ These are not universal functions: .. autosummary:: :toctree: generated/ riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative. riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative. Struve Functions ---------------- .. autosummary:: :toctree: generated/ struve -- Struve function. modstruve -- Modified Struve function. itstruve0 -- Integral of the Struve function of order 0. it2struve0 -- Integral related to the Struve function of order 0. itmodstruve0 -- Integral of the modified Struve function of order 0. Raw Statistical Functions ------------------------- .. seealso:: :mod:`scipy.stats`: Friendly versions of these functions. .. autosummary:: :toctree: generated/ bdtr -- Binomial distribution cumulative distribution function. bdtrc -- Binomial distribution survival function. bdtri -- Inverse function to `bdtr` with respect to `p`. bdtrik -- Inverse function to `bdtr` with respect to `k`. bdtrin -- Inverse function to `bdtr` with respect to `n`. btdtr -- Cumulative density function of the beta distribution. btdtri -- The `p`-th quantile of the beta distribution. btdtria -- Inverse of `btdtr` with respect to `a`. btdtrib -- btdtria(a, p, x) fdtr -- F cumulative distribution function. fdtrc -- F survival function. fdtri -- The `p`-th quantile of the F-distribution. fdtridfd -- Inverse to `fdtr` vs dfd gdtr -- Gamma distribution cumulative density function. gdtrc -- Gamma distribution survival function. gdtria -- Inverse of `gdtr` vs a. gdtrib -- Inverse of `gdtr` vs b. gdtrix -- Inverse of `gdtr` vs x. nbdtr -- Negative binomial cumulative distribution function. nbdtrc -- Negative binomial survival function. nbdtri -- Inverse of `nbdtr` vs `p`. nbdtrik -- Inverse of `nbdtr` vs `k`. nbdtrin -- Inverse of `nbdtr` vs `n`. ncfdtr -- Cumulative distribution function of the non-central F distribution. ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution. ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution. ncfdtri -- Inverse cumulative distribution function of the non-central F distribution. ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution. nctdtr -- Cumulative distribution function of the non-central `t` distribution. nctdtridf -- Calculate degrees of freedom for non-central t distribution. nctdtrit -- Inverse cumulative distribution function of the non-central t distribution. nctdtrinc -- Calculate non-centrality parameter for non-central t distribution. nrdtrimn -- Calculate mean of normal distribution given other params. nrdtrisd -- Calculate standard deviation of normal distribution given other params. pdtr -- Poisson cumulative distribution function pdtrc -- Poisson survival function pdtri -- Inverse to `pdtr` vs m pdtrik -- Inverse to `pdtr` vs k stdtr -- Student t distribution cumulative density function stdtridf -- Inverse of `stdtr` vs df stdtrit -- Inverse of `stdtr` vs `t` chdtr -- Chi square cumulative distribution function chdtrc -- Chi square survival function chdtri -- Inverse to `chdtrc` chdtriv -- Inverse to `chdtr` vs `v` ndtr -- Gaussian cumulative distribution function. log_ndtr -- Logarithm of Gaussian cumulative distribution function. ndtri -- Inverse of `ndtr` vs x chndtr -- Non-central chi square cumulative distribution function chndtridf -- Inverse to `chndtr` vs `df` chndtrinc -- Inverse to `chndtr` vs `nc` chndtrix -- Inverse to `chndtr` vs `x` smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function smirnovi -- Inverse to `smirnov` kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution kolmogi -- Inverse function to kolmogorov tklmbda -- Tukey-Lambda cumulative distribution function logit -- Logit ufunc for ndarrays. expit -- Expit ufunc for ndarrays. boxcox -- Compute the Box-Cox transformation. boxcox1p -- Compute the Box-Cox transformation of 1 + `x`. inv_boxcox -- Compute the inverse of the Box-Cox transformation. inv_boxcox1p -- Compute the inverse of the Box-Cox transformation. owens_t -- Owen's T Function. Information Theory Functions ---------------------------- .. autosummary:: :toctree: generated/ entr -- Elementwise function for computing entropy. rel_entr -- Elementwise function for computing relative entropy. kl_div -- Elementwise function for computing Kullback-Leibler divergence. huber -- Huber loss function. pseudo_huber -- Pseudo-Huber loss function. Gamma and Related Functions --------------------------- .. autosummary:: :toctree: generated/ gamma -- Gamma function. gammaln -- Logarithm of the absolute value of the Gamma function for real inputs. loggamma -- Principal branch of the logarithm of the Gamma function. gammasgn -- Sign of the gamma function. gammainc -- Regularized lower incomplete gamma function. gammaincinv -- Inverse to `gammainc` gammaincc -- Regularized upper incomplete gamma function. gammainccinv -- Inverse to `gammaincc` beta -- Beta function. betaln -- Natural logarithm of absolute value of beta function. betainc -- Incomplete beta integral. betaincinv -- Inverse function to beta integral. psi -- The digamma function. rgamma -- Gamma function inverted polygamma -- Polygamma function n. multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma. digamma -- psi(x[, out]) poch -- Rising factorial (z)_m Error Function and Fresnel Integrals ------------------------------------ .. autosummary:: :toctree: generated/ erf -- Returns the error function of complex argument. erfc -- Complementary error function, ``1 - erf(x)``. erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``. erfi -- Imaginary error function, ``-i erf(i z)``. erfinv -- Inverse function for erf. erfcinv -- Inverse function for erfc. wofz -- Faddeeva function dawsn -- Dawson's integral. fresnel -- Fresnel sin and cos integrals fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). modfresnelp -- Modified Fresnel positive integrals modfresnelm -- Modified Fresnel negative integrals These are not universal functions: .. autosummary:: :toctree: generated/ erf_zeros -- [+]Compute nt complex zeros of error function erf(z). fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z). fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z). Legendre Functions ------------------ .. autosummary:: :toctree: generated/ lpmv -- Associated Legendre function of integer order and real degree. sph_harm -- Compute spherical harmonics. These are not universal functions: .. autosummary:: :toctree: generated/ clpmn -- [+]Associated Legendre function of the first kind for complex arguments. lpn -- [+]Legendre function of the first kind. lqn -- [+]Legendre function of the second kind. lpmn -- [+]Sequence of associated Legendre functions of the first kind. lqmn -- [+]Sequence of associated Legendre functions of the second kind. Ellipsoidal Harmonics --------------------- .. autosummary:: :toctree: generated/ ellip_harm -- Ellipsoidal harmonic functions E^p_n(l) ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l) ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n Orthogonal polynomials ---------------------- The following functions evaluate values of orthogonal polynomials: .. autosummary:: :toctree: generated/ assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k. eval_legendre -- Evaluate Legendre polynomial at a point. eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point. eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point. eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point. eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point. eval_jacobi -- Evaluate Jacobi polynomial at a point. eval_laguerre -- Evaluate Laguerre polynomial at a point. eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point. eval_hermite -- Evaluate physicist's Hermite polynomial at a point. eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point. eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point. eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point. eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point. eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point. eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point. The following functions compute roots and quadrature weights for orthogonal polynomials: .. autosummary:: :toctree: generated/ roots_legendre -- Gauss-Legendre quadrature. roots_chebyt -- Gauss-Chebyshev (first kind) quadrature. roots_chebyu -- Gauss-Chebyshev (second kind) quadrature. roots_chebyc -- Gauss-Chebyshev (first kind) quadrature. roots_chebys -- Gauss-Chebyshev (second kind) quadrature. roots_jacobi -- Gauss-Jacobi quadrature. roots_laguerre -- Gauss-Laguerre quadrature. roots_genlaguerre -- Gauss-generalized Laguerre quadrature. roots_hermite -- Gauss-Hermite (physicst's) quadrature. roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature. roots_gegenbauer -- Gauss-Gegenbauer quadrature. roots_sh_legendre -- Gauss-Legendre (shifted) quadrature. roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature. roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature. roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature. The functions below, in turn, return the polynomial coefficients in :class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`. The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns the roots, weights, and total weights for the appropriate form of Gaussian quadrature. These are returned in an ``n x 3`` array with roots in the first column, weights in the second column, and total weights in the final column. Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing arithmetic, and lose information of the original orthogonal polynomial. .. autosummary:: :toctree: generated/ legendre -- [+]Legendre polynomial. chebyt -- [+]Chebyshev polynomial of the first kind. chebyu -- [+]Chebyshev polynomial of the second kind. chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`. chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`. jacobi -- [+]Jacobi polynomial. laguerre -- [+]Laguerre polynomial. genlaguerre -- [+]Generalized (associated) Laguerre polynomial. hermite -- [+]Physicist's Hermite polynomial. hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial. gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial. sh_legendre -- [+]Shifted Legendre polynomial. sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind. sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind. sh_jacobi -- [+]Shifted Jacobi polynomial. .. warning:: Computing values of high-order polynomials (around ``order > 20``) using polynomial coefficients is numerically unstable. To evaluate polynomial values, the ``eval_*`` functions should be used instead. Hypergeometric Functions ------------------------ .. autosummary:: :toctree: generated/ hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z). hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x) hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind hyp0f1 -- Confluent hypergeometric limit function 0F1. hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate hyp1f2 -- Hypergeometric function 1F2 and error estimate hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate Parabolic Cylinder Functions ---------------------------- .. autosummary:: :toctree: generated/ pbdv -- Parabolic cylinder function D pbvv -- Parabolic cylinder function V pbwa -- Parabolic cylinder function W These are not universal functions: .. autosummary:: :toctree: generated/ pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives. pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives. pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives. Mathieu and Related Functions ----------------------------- .. autosummary:: :toctree: generated/ mathieu_a -- Characteristic value of even Mathieu functions mathieu_b -- Characteristic value of odd Mathieu functions These are not universal functions: .. autosummary:: :toctree: generated/ mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions. mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions. The following return both function and first derivative: .. autosummary:: :toctree: generated/ mathieu_cem -- Even Mathieu function and its derivative mathieu_sem -- Odd Mathieu function and its derivative mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative Spheroidal Wave Functions ------------------------- .. autosummary:: :toctree: generated/ pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative. pro_cv -- Characteristic value of prolate spheroidal function obl_cv -- Characteristic value of oblate spheroidal function pro_cv_seq -- Characteristic values for prolate spheroidal wave functions. obl_cv_seq -- Characteristic values for oblate spheroidal wave functions. The following functions require pre-computed characteristic value: .. autosummary:: :toctree: generated/ pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value Kelvin Functions ---------------- .. autosummary:: :toctree: generated/ kelvin -- Kelvin functions as complex numbers kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions. ber -- Kelvin function ber. bei -- Kelvin function bei berp -- Derivative of the Kelvin function `ber` beip -- Derivative of the Kelvin function `bei` ker -- Kelvin function ker kei -- Kelvin function ker kerp -- Derivative of the Kelvin function ker keip -- Derivative of the Kelvin function kei These are not universal functions: .. autosummary:: :toctree: generated/ ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x). bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x). berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x). beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x). ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x). kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x). kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x). keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x). Combinatorics ------------- .. autosummary:: :toctree: generated/ comb -- [+]The number of combinations of N things taken k at a time. perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N. Lambert W and Related Functions ------------------------------- .. autosummary:: :toctree: generated/ lambertw -- Lambert W function. wrightomega -- Wright Omega function. Other Special Functions ----------------------- .. autosummary:: :toctree: generated/ agm -- Arithmetic, Geometric Mean. bernoulli -- Bernoulli numbers B0..Bn (inclusive). binom -- Binomial coefficient diric -- Periodic sinc function, also called the Dirichlet function. euler -- Euler numbers E0..En (inclusive). expn -- Exponential integral E_n exp1 -- Exponential integral E_1 of complex argument z expi -- Exponential integral Ei factorial -- The factorial of a number or array of numbers. factorial2 -- Double factorial. factorialk -- [+]Multifactorial of n of order k, n(!!...!). shichi -- Hyperbolic sine and cosine integrals. sici -- Sine and cosine integrals. spence -- Spence's function, also known as the dilogarithm. zeta -- Riemann zeta function. zetac -- Riemann zeta function minus 1. Convenience Functions --------------------- .. autosummary:: :toctree: generated/ cbrt -- Cube root of `x` exp10 -- 10**x exp2 -- 2**x radian -- Convert from degrees to radians cosdg -- Cosine of the angle `x` given in degrees. sindg -- Sine of angle given in degrees tandg -- Tangent of angle x given in degrees. cotdg -- Cotangent of the angle `x` given in degrees. log1p -- Calculates log(1+x) for use when `x` is near zero expm1 -- exp(x) - 1 for use when `x` is near zero. cosm1 -- cos(x) - 1 for use when `x` is near zero. round -- Round to nearest integer xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. logsumexp -- Compute the log of the sum of exponentials of input elements. exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero. sinc -- Return the sinc function. .. [+] in the description indicates a function which is not a universal .. function and does not follow broadcasting and automatic .. array-looping rules. """ from __future__ import division, print_function, absolute_import from .sf_error import SpecialFunctionWarning, SpecialFunctionError from ._ufuncs import * from .basic import * from ._logsumexp import logsumexp from . import specfun from . import orthogonal from .orthogonal import * from .spfun_stats import multigammaln from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal from .lambertw import lambertw from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in, spherical_kn) __all__ = [s for s in dir() if not s.startswith('_')] from numpy.dual import register_func register_func('i0',i0) del register_func from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
27,475
40.504532
104
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_logsumexp.py
from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib._util import _asarray_validated __all__ = ["logsumexp"] def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): """Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : None or int or tuple of ints, optional Axis or axes over which the sum is taken. By default `axis` is None, and all elements are summed. .. versionadded:: 0.11.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array. .. versionadded:: 0.15.0 b : array-like, optional Scaling factor for exp(`a`) must be of the same shape as `a` or broadcastable to `a`. These values may be negative in order to implement subtraction. .. versionadded:: 0.12.0 return_sign : bool, optional If this is set to True, the result will be a pair containing sign information; if False, results that are negative will be returned as NaN. Default is False (no sign information). .. versionadded:: 0.16.0 Returns ------- res : ndarray The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` is returned. sgn : ndarray If return_sign is True, this will be an array of floating-point numbers matching res and +1, 0, or -1 depending on the sign of the result. If False, only one result is returned. See Also -------- numpy.logaddexp, numpy.logaddexp2 Notes ----- Numpy has a logaddexp function which is very similar to `logsumexp`, but only handles two arguments. `logaddexp.reduce` is similar to this function, but may be less stable. Examples -------- >>> from scipy.special import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 With weights >>> a = np.arange(10) >>> b = np.arange(10, 0, -1) >>> logsumexp(a, b=b) 9.9170178533034665 >>> np.log(np.sum(b*np.exp(a))) 9.9170178533034647 Returning a sign flag >>> logsumexp([1,2],b=[1,-1],return_sign=True) (1.5413248546129181, -1.0) Notice that `logsumexp` does not directly support masked arrays. To use it on a masked array, convert the mask into zero weights: >>> a = np.ma.array([np.log(2), 2, np.log(3)], ... mask=[False, True, False]) >>> b = (~a.mask).astype(int) >>> logsumexp(a.data, b=b), np.log(5) 1.6094379124341005, 1.6094379124341005 """ a = _asarray_validated(a, check_finite=False) if b is not None: a, b = np.broadcast_arrays(a, b) if np.any(b == 0): a = a + 0. # promote to at least float a[b == 0] = -np.inf a_max = np.amax(a, axis=axis, keepdims=True) if a_max.ndim > 0: a_max[~np.isfinite(a_max)] = 0 elif not np.isfinite(a_max): a_max = 0 if b is not None: b = np.asarray(b) tmp = b * np.exp(a - a_max) else: tmp = np.exp(a - a_max) # suppress warnings about log of zero with np.errstate(divide='ignore'): s = np.sum(tmp, axis=axis, keepdims=keepdims) if return_sign: sgn = np.sign(s) s *= sgn # /= makes more sense but we need zero -> zero out = np.log(s) if not keepdims: a_max = np.squeeze(a_max, axis=axis) out += a_max if return_sign: return out, sgn else: return out
3,882
28.869231
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/basic.py
# # Author: Travis Oliphant, 2002 # from __future__ import division, print_function, absolute_import import operator import numpy as np import math from scipy._lib.six import xrange from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, where, mgrid, sin, place, issubdtype, extract, less, inexact, nan, zeros, sinc) from . import _ufuncs as ufuncs from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, _zeta, hankel1, hankel2, yv, kv, ndtri, poch, binom, hyp0f1) from . import specfun from . import orthogonal from ._comb import _comb_int __all__ = ['ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros', 'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula', 'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial', 'factorialk', 'factorial2', 'fresnel_zeros', 'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'h1vp', 'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros', 'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros', 'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv', 'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a', 'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri', 'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm', 'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn', 'sinc', 'y0_zeros', 'y1_zeros', 'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta'] def _nonneg_int_or_fail(n, var_name, strict=True): try: if strict: # Raises an exception if float n = operator.index(n) elif n == floor(n): n = int(n) else: raise ValueError() if n < 0: raise ValueError() except (ValueError, TypeError) as err: raise err.__class__("{} must be a non-negative integer".format(var_name)) return n def diric(x, n): """Periodic sinc function, also called the Dirichlet function. The Dirichlet function is defined as:: diric(x, n) = sin(x * n/2) / (n * sin(x / 2)), where `n` is a positive integer. Parameters ---------- x : array_like Input data n : int Integer defining the periodicity. Returns ------- diric : ndarray Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201) >>> plt.figure(figsize=(8, 8)); >>> for idx, n in enumerate([2, 3, 4, 9]): ... plt.subplot(2, 2, idx+1) ... plt.plot(x, special.diric(x, n)) ... plt.title('diric, n={}'.format(n)) >>> plt.show() The following example demonstrates that `diric` gives the magnitudes (modulo the sign and scaling) of the Fourier coefficients of a rectangular pulse. Suppress output of values that are effectively 0: >>> np.set_printoptions(suppress=True) Create a signal `x` of length `m` with `k` ones: >>> m = 8 >>> k = 3 >>> x = np.zeros(m) >>> x[:k] = 1 Use the FFT to compute the Fourier transform of `x`, and inspect the magnitudes of the coefficients: >>> np.abs(np.fft.fft(x)) array([ 3. , 2.41421356, 1. , 0.41421356, 1. , 0.41421356, 1. , 2.41421356]) Now find the same values (up to sign) using `diric`. We multiply by `k` to account for the different scaling conventions of `numpy.fft.fft` and `diric`: >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False) >>> k * special.diric(theta, k) array([ 3. , 2.41421356, 1. , -0.41421356, -1. , -0.41421356, 1. , 2.41421356]) """ x, n = asarray(x), asarray(n) n = asarray(n + (x-x)) x = asarray(x + (n-n)) if issubdtype(x.dtype, inexact): ytype = x.dtype else: ytype = float y = zeros(x.shape, ytype) # empirical minval for 32, 64 or 128 bit float computations # where sin(x/2) < minval, result is fixed at +1 or -1 if np.finfo(ytype).eps < 1e-18: minval = 1e-11 elif np.finfo(ytype).eps < 1e-15: minval = 1e-7 else: minval = 1e-3 mask1 = (n <= 0) | (n != floor(n)) place(y, mask1, nan) x = x / 2 denom = sin(x) mask2 = (1-mask1) & (abs(denom) < minval) xsub = extract(mask2, x) nsub = extract(mask2, n) zsub = xsub / pi place(y, mask2, pow(-1, np.round(zsub)*(nsub-1))) mask = (1-mask1) & (1-mask2) xsub = extract(mask, x) nsub = extract(mask, n) dsub = extract(mask, denom) place(y, mask, sin(nsub*xsub)/(nsub*dsub)) return y def jnjnp_zeros(nt): """Compute zeros of integer-order Bessel functions Jn and Jn'. Results are arranged in order of the magnitudes of the zeros. Parameters ---------- nt : int Number (<=1200) of zeros to compute Returns ------- zo[l-1] : ndarray Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`. n[l-1] : ndarray Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. m[l-1] : ndarray Serial number of the zeros of Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. t[l-1] : ndarray 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of length `nt`. See Also -------- jn_zeros, jnp_zeros : to get separated arrays of zeros. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200): raise ValueError("Number must be integer <= 1200.") nt = int(nt) n, m, t, zo = specfun.jdzo(nt) return zo[1:nt+1], n[:nt], m[:nt], t[:nt] def jnyn_zeros(n, nt): """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. Parameters ---------- n : int Order of the Bessel functions nt : int Number (<=1200) of zeros to compute See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(nt) and isscalar(n)): raise ValueError("Arguments must be scalars.") if (floor(n) != n) or (floor(nt) != nt): raise ValueError("Arguments must be integers.") if (nt <= 0): raise ValueError("nt > 0") return specfun.jyzo(abs(n), nt) def jn_zeros(n, nt): """Compute zeros of integer-order Bessel function Jn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ return jnyn_zeros(n, nt)[0] def jnp_zeros(n, nt): """Compute zeros of integer-order Bessel function derivative Jn'(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ return jnyn_zeros(n, nt)[1] def yn_zeros(n, nt): """Compute zeros of integer-order Bessel function Yn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ return jnyn_zeros(n, nt)[2] def ynp_zeros(n, nt): """Compute zeros of integer-order Bessel function derivative Yn'(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ return jnyn_zeros(n, nt)[3] def y0_zeros(nt, complex=False): """Compute nt zeros of Bessel function Y0(z), and derivative at each zero. The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z0n : ndarray Location of nth zero of Y0(z) y0pz0n : ndarray Value of derivative Y0'(z0) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 0 kc = not complex return specfun.cyzo(nt, kf, kc) def y1_zeros(nt, complex=False): """Compute nt zeros of Bessel function Y1(z), and derivative at each zero. The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z1n : ndarray Location of nth zero of Y1(z) y1pz1n : ndarray Value of derivative Y1'(z1) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 1 kc = not complex return specfun.cyzo(nt, kf, kc) def y1p_zeros(nt, complex=False): """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. The values are given by Y1(z1) at each z1 where Y1'(z1)=0. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z1pn : ndarray Location of nth zero of Y1'(z) y1z1pn : ndarray Value of derivative Y1(z1) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 2 kc = not complex return specfun.cyzo(nt, kf, kc) def _bessel_diff_formula(v, z, n, L, phase): # from AMS55. # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1 # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1 # For K, you can pull out the exp((v-k)*pi*i) into the caller v = asarray(v) p = 1.0 s = L(v-n, z) for i in xrange(1, n+1): p = phase * (p * (n-i+1)) / i # = choose(k, i) s += p*L(v-n + i*2, z) return s / (2.**n) bessel_diff_formula = np.deprecate(_bessel_diff_formula, message="bessel_diff_formula is a private function, do not use it!") def jvp(v, z, n=1): """Compute nth derivative of Bessel function Jv(z) with respect to `z`. Parameters ---------- v : float Order of Bessel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return jv(v, z) else: return _bessel_diff_formula(v, z, n, jv, -1) def yvp(v, z, n=1): """Compute nth derivative of Bessel function Yv(z) with respect to `z`. Parameters ---------- v : float Order of Bessel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return yv(v, z) else: return _bessel_diff_formula(v, z, n, yv, -1) def kvp(v, z, n=1): """Compute nth derivative of real-order modified Bessel function Kv(z) Kv(z) is the modified Bessel function of the second kind. Derivative is calculated with respect to `z`. Parameters ---------- v : array_like of float Order of Bessel function z : array_like of complex Argument at which to evaluate the derivative n : int Order of derivative. Default is first derivative. Returns ------- out : ndarray The results Examples -------- Calculate multiple values at order 5: >>> from scipy.special import kvp >>> kvp(5, (1, 2, 3+5j)) array([-1.84903536e+03+0.j , -2.57735387e+01+0.j , -3.06627741e-02+0.08750845j]) Calculate for a single value at multiple orders: >>> kvp((4, 4.5, 5), 1) array([ -184.0309, -568.9585, -1849.0354]) Notes ----- The derivative is computed using the relation DLFM 10.29.5 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 6. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.29.E5 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return kv(v, z) else: return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1) def ivp(v, z, n=1): """Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. Parameters ---------- v : array_like of float Order of Bessel function z : array_like of complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.29.5 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 6. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.29.E5 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return iv(v, z) else: return _bessel_diff_formula(v, z, n, iv, 1) def h1vp(v, z, n=1): """Compute nth derivative of Hankel function H1v(z) with respect to `z`. Parameters ---------- v : float Order of Hankel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return hankel1(v, z) else: return _bessel_diff_formula(v, z, n, hankel1, -1) def h2vp(v, z, n=1): """Compute nth derivative of Hankel function H2v(z) with respect to `z`. Parameters ---------- v : float Order of Hankel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ n = _nonneg_int_or_fail(n, 'n') if n == 0: return hankel2(v, z) else: return _bessel_diff_formula(v, z, n, hankel2, -1) def riccati_jn(n, x): r"""Compute Ricatti-Bessel function of the first kind and its derivative. The Ricatti-Bessel function of the first kind is defined as :math:`x j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first kind of order :math:`n`. This function computes the value and first derivative of the Ricatti-Bessel function for all orders up to and including `n`. Parameters ---------- n : int Maximum order of function to compute x : float Argument at which to evaluate Returns ------- jn : ndarray Value of j0(x), ..., jn(x) jnp : ndarray First derivative j0'(x), ..., jn'(x) Notes ----- The computation is carried out via backward recurrence, using the relation DLMF 10.51.1 [2]_. Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.51.E1 """ if not (isscalar(n) and isscalar(x)): raise ValueError("arguments must be scalars.") n = _nonneg_int_or_fail(n, 'n', strict=False) if (n == 0): n1 = 1 else: n1 = n nm, jn, jnp = specfun.rctj(n1, x) return jn[:(n+1)], jnp[:(n+1)] def riccati_yn(n, x): """Compute Ricatti-Bessel function of the second kind and its derivative. The Ricatti-Bessel function of the second kind is defined as :math:`x y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second kind of order :math:`n`. This function computes the value and first derivative of the function for all orders up to and including `n`. Parameters ---------- n : int Maximum order of function to compute x : float Argument at which to evaluate Returns ------- yn : ndarray Value of y0(x), ..., yn(x) ynp : ndarray First derivative y0'(x), ..., yn'(x) Notes ----- The computation is carried out via ascending recurrence, using the relation DLMF 10.51.1 [2]_. Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.51.E1 """ if not (isscalar(n) and isscalar(x)): raise ValueError("arguments must be scalars.") n = _nonneg_int_or_fail(n, 'n', strict=False) if (n == 0): n1 = 1 else: n1 = n nm, jn, jnp = specfun.rcty(n1, x) return jn[:(n+1)], jnp[:(n+1)] def erfinv(y): """Inverse function for erf. """ return ndtri((y+1)/2.0)/sqrt(2) def erfcinv(y): """Inverse function for erfc. """ return -ndtri(0.5*y)/sqrt(2) def erf_zeros(nt): """Compute nt complex zeros of error function erf(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.cerzo(nt) def fresnelc_zeros(nt): """Compute nt complex zeros of cosine Fresnel integral C(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(1, nt) def fresnels_zeros(nt): """Compute nt complex zeros of sine Fresnel integral S(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(2, nt) def fresnel_zeros(nt): """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(2, nt), specfun.fcszo(1, nt) def assoc_laguerre(x, n, k=0.0): """Compute the generalized (associated) Laguerre polynomial of degree n and order k. The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``, with weighting function ``exp(-x) * x**k`` with ``k > -1``. Notes ----- `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with reversed argument order ``(x, n, k=0.0) --> (n, k, x)``. """ return orthogonal.eval_genlaguerre(n, k, x) digamma = psi def polygamma(n, x): """Polygamma function n. This is the nth derivative of the digamma (psi) function. Parameters ---------- n : array_like of int The order of the derivative of `psi`. x : array_like Where to evaluate the polygamma function. Returns ------- polygamma : ndarray The result. Examples -------- >>> from scipy import special >>> x = [2, 3, 25.5] >>> special.polygamma(1, x) array([ 0.64493407, 0.39493407, 0.03999467]) >>> special.polygamma(0, x) == special.psi(x) array([ True, True, True], dtype=bool) """ n, x = asarray(n), asarray(x) fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x) return where(n == 0, psi(x), fac2) def mathieu_even_coef(m, q): r"""Fourier coefficients for even Mathieu and modified Mathieu functions. The Fourier series of the even solutions of the Mathieu differential equation are of the form .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz .. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input m=2n+1. Parameters ---------- m : int Order of Mathieu functions. Must be non-negative. q : float (>=0) Parameter of Mathieu functions. Must be non-negative. Returns ------- Ak : ndarray Even or odd Fourier coefficients, corresponding to even or odd m. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/28.4#i """ if not (isscalar(m) and isscalar(q)): raise ValueError("m and q must be scalars.") if (q < 0): raise ValueError("q >=0") if (m != floor(m)) or (m < 0): raise ValueError("m must be an integer >=0.") if (q <= 1): qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q else: qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q km = int(qm + 0.5*m) if km > 251: print("Warning, too many predicted coefficients.") kd = 1 m = int(floor(m)) if m % 2: kd = 2 a = mathieu_a(m, q) fc = specfun.fcoef(kd, m, q, a) return fc[:km] def mathieu_odd_coef(m, q): r"""Fourier coefficients for even Mathieu and modified Mathieu functions. The Fourier series of the odd solutions of the Mathieu differential equation are of the form .. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z .. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd input m=2n+1. Parameters ---------- m : int Order of Mathieu functions. Must be non-negative. q : float (>=0) Parameter of Mathieu functions. Must be non-negative. Returns ------- Bk : ndarray Even or odd Fourier coefficients, corresponding to even or odd m. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(m) and isscalar(q)): raise ValueError("m and q must be scalars.") if (q < 0): raise ValueError("q >=0") if (m != floor(m)) or (m <= 0): raise ValueError("m must be an integer > 0") if (q <= 1): qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q else: qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q km = int(qm + 0.5*m) if km > 251: print("Warning, too many predicted coefficients.") kd = 4 m = int(floor(m)) if m % 2: kd = 3 b = mathieu_b(m, q) fc = specfun.fcoef(kd, m, q, b) return fc[:km] def lpmn(m, n, z): """Sequence of associated Legendre functions of the first kind. Computes the associated Legendre function of the first kind of order m and degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. This function takes a real argument ``z``. For complex arguments ``z`` use clpmn instead. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : float Input value. Returns ------- Pmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Pmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n See Also -------- clpmn: associated Legendre functions of the first kind for complex z Notes ----- In the interval (-1, 1), Ferrer's function of the first kind is returned. The phase convention used for the intervals (1, inf) and (-inf, -1) is such that the result is always real. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/14.3 """ if not isscalar(m) or (abs(m) > n): raise ValueError("m must be <= n.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") if iscomplex(z): raise ValueError("Argument must be real. Use clpmn instead.") if (m < 0): mp = -m mf, nf = mgrid[0:mp+1, 0:n+1] with ufuncs.errstate(all='ignore'): if abs(z) < 1: # Ferrer function; DLMF 14.9.3 fixarr = where(mf > nf, 0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) else: # Match to clpmn; DLMF 14.9.13 fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) else: mp = m p, pd = specfun.lpmn(mp, n, z) if (m < 0): p = p * fixarr pd = pd * fixarr return p, pd def clpmn(m, n, z, type=3): """Associated Legendre function of the first kind for complex arguments. Computes the associated Legendre function of the first kind of order m and degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : float or complex Input value. type : int, optional takes values 2 or 3 2: cut on the real axis ``|x| > 1`` 3: cut on the real axis ``-1 < x < 1`` (default) Returns ------- Pmn_z : (m+1, n+1) array Values for all orders ``0..m`` and degrees ``0..n`` Pmn_d_z : (m+1, n+1) array Derivatives for all orders ``0..m`` and degrees ``0..n`` See Also -------- lpmn: associated Legendre functions of the first kind for real z Notes ----- By default, i.e. for ``type=3``, phase conventions are chosen according to [1]_ such that the function is analytic. The cut lies on the interval (-1, 1). Approaching the cut from above or below in general yields a phase factor with respect to Ferrer's function of the first kind (cf. `lpmn`). For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values on the interval (-1, 1) in the complex plane yields Ferrer's function of the first kind. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/14.21 """ if not isscalar(m) or (abs(m) > n): raise ValueError("m must be <= n.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") if not(type == 2 or type == 3): raise ValueError("type must be either 2 or 3.") if (m < 0): mp = -m mf, nf = mgrid[0:mp+1, 0:n+1] with ufuncs.errstate(all='ignore'): if type == 2: fixarr = where(mf > nf, 0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) else: fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) else: mp = m p, pd = specfun.clpmn(mp, n, real(z), imag(z), type) if (m < 0): p = p * fixarr pd = pd * fixarr return p, pd def lqmn(m, n, z): """Sequence of associated Legendre functions of the second kind. Computes the associated Legendre function of the second kind of order m and degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : complex Input value. Returns ------- Qmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Qmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(m) or (m < 0): raise ValueError("m must be a non-negative integer.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") m = int(m) n = int(n) # Ensure neither m nor n == 0 mm = max(1, m) nn = max(1, n) if iscomplex(z): q, qd = specfun.clqmn(mm, nn, z) else: q, qd = specfun.lqmn(mm, nn, z) return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)] def bernoulli(n): """Bernoulli numbers B0..Bn (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") n = int(n) if (n < 2): n1 = 2 else: n1 = n return specfun.bernob(int(n1))[:(n+1)] def euler(n): """Euler numbers E(0), E(1), ..., E(n). The Euler numbers [1]_ are also known as the secant numbers. Because ``euler(n)`` returns floating point values, it does not give exact values for large `n`. The first inexact value is E(22). Parameters ---------- n : int The highest index of the Euler number to be returned. Returns ------- ndarray The Euler numbers [E(0), E(1), ..., E(n)]. The odd Euler numbers, which are all zero, are included. References ---------- .. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences, https://oeis.org/A122045 .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html Examples -------- >>> from scipy.special import euler >>> euler(6) array([ 1., 0., -1., 0., 5., 0., -61.]) >>> euler(13).astype(np.int64) array([ 1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521, 0, 2702765, 0]) >>> euler(22)[-1] # Exact value of E(22) is -69348874393137901. -69348874393137976.0 """ if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") n = int(n) if (n < 2): n1 = 2 else: n1 = n return specfun.eulerb(n1)[:(n+1)] def lpn(n, z): """Legendre function of the first kind. Compute sequence of Legendre functions of the first kind (polynomials), Pn(z) and derivatives for all degrees from 0 to n (inclusive). See also special.legendre for polynomial class. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") n = _nonneg_int_or_fail(n, 'n', strict=False) if (n < 1): n1 = 1 else: n1 = n if iscomplex(z): pn, pd = specfun.clpn(n1, z) else: pn, pd = specfun.lpn(n1, z) return pn[:(n+1)], pd[:(n+1)] def lqn(n, z): """Legendre function of the second kind. Compute sequence of Legendre functions of the second kind, Qn(z) and derivatives for all degrees from 0 to n (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") n = _nonneg_int_or_fail(n, 'n', strict=False) if (n < 1): n1 = 1 else: n1 = n if iscomplex(z): qn, qd = specfun.clqn(n1, z) else: qn, qd = specfun.lqnb(n1, z) return qn[:(n+1)], qd[:(n+1)] def ai_zeros(nt): """ Compute `nt` zeros and values of the Airy function Ai and its derivative. Computes the first `nt` zeros, `a`, of the Airy function Ai(x); first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x); the corresponding values Ai(a'); and the corresponding values Ai'(a). Parameters ---------- nt : int Number of zeros to compute Returns ------- a : ndarray First `nt` zeros of Ai(x) ap : ndarray First `nt` zeros of Ai'(x) ai : ndarray Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) aip : ndarray Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ kf = 1 if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be a positive integer scalar.") return specfun.airyzo(nt, kf) def bi_zeros(nt): """ Compute `nt` zeros and values of the Airy function Bi and its derivative. Computes the first `nt` zeros, b, of the Airy function Bi(x); first `nt` zeros, b', of the derivative of the Airy function Bi'(x); the corresponding values Bi(b'); and the corresponding values Bi'(b). Parameters ---------- nt : int Number of zeros to compute Returns ------- b : ndarray First `nt` zeros of Bi(x) bp : ndarray First `nt` zeros of Bi'(x) bi : ndarray Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) bip : ndarray Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ kf = 2 if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be a positive integer scalar.") return specfun.airyzo(nt, kf) def lmbda(v, x): r"""Jahnke-Emden Lambda function, Lambdav(x). This function is defined as [2]_, .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v}, where :math:`\Gamma` is the gamma function and :math:`J_v` is the Bessel function of the first kind. Parameters ---------- v : float Order of the Lambda function x : float Value at which to evaluate the function and derivatives Returns ------- vl : ndarray Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dl : ndarray Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and Curves" (4th ed.), Dover, 1945 """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") if (v < 0): raise ValueError("argument must be > 0.") n = int(v) v0 = v - n if (n < 1): n1 = 1 else: n1 = n v1 = n1 + v0 if (v != floor(v)): vm, vl, dl = specfun.lamv(v1, x) else: vm, vl, dl = specfun.lamn(v1, x) return vl[:(n+1)], dl[:(n+1)] def pbdv_seq(v, x): """Parabolic cylinder functions Dv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") n = int(v) v0 = v-n if (n < 1): n1 = 1 else: n1 = n v1 = n1 + v0 dv, dp, pdf, pdd = specfun.pbdv(v1, x) return dv[:n1+1], dp[:n1+1] def pbvv_seq(v, x): """Parabolic cylinder functions Vv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") n = int(v) v0 = v-n if (n <= 1): n1 = 1 else: n1 = n v1 = n1 + v0 dv, dp, pdf, pdd = specfun.pbvv(v1, x) return dv[:n1+1], dp[:n1+1] def pbdn_seq(n, z): """Parabolic cylinder functions Dn(z) and derivatives. Parameters ---------- n : int Order of the parabolic cylinder function z : complex Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_i(z), for i=0, ..., i=n. dp : ndarray Derivatives D_i'(z), for i=0, ..., i=n. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") if (floor(n) != n): raise ValueError("n must be an integer.") if (abs(n) <= 1): n1 = 1 else: n1 = n cpb, cpd = specfun.cpbdn(n1, z) return cpb[:n1+1], cpd[:n1+1] def ber_zeros(nt): """Compute nt zeros of the Kelvin function ber(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 1) def bei_zeros(nt): """Compute nt zeros of the Kelvin function bei(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 2) def ker_zeros(nt): """Compute nt zeros of the Kelvin function ker(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 3) def kei_zeros(nt): """Compute nt zeros of the Kelvin function kei(x). """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 4) def berp_zeros(nt): """Compute nt zeros of the Kelvin function ber'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 5) def beip_zeros(nt): """Compute nt zeros of the Kelvin function bei'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 6) def kerp_zeros(nt): """Compute nt zeros of the Kelvin function ker'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 7) def keip_zeros(nt): """Compute nt zeros of the Kelvin function kei'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 8) def kelvin_zeros(nt): """Compute nt zeros of all Kelvin functions. Returned in a length-8 tuple of arrays of length nt. The tuple contains the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return (specfun.klvnzo(nt, 1), specfun.klvnzo(nt, 2), specfun.klvnzo(nt, 3), specfun.klvnzo(nt, 4), specfun.klvnzo(nt, 5), specfun.klvnzo(nt, 6), specfun.klvnzo(nt, 7), specfun.klvnzo(nt, 8)) def pro_cv_seq(m, n, c): """Characteristic values for prolate spheroidal wave functions. Compute a sequence of characteristic values for the prolate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(m) and isscalar(n) and isscalar(c)): raise ValueError("Arguments must be scalars.") if (n != floor(n)) or (m != floor(m)): raise ValueError("Modes must be integers.") if (n-m > 199): raise ValueError("Difference between n and m is too large.") maxL = n-m+1 return specfun.segv(m, n, c, 1)[1][:maxL] def obl_cv_seq(m, n, c): """Characteristic values for oblate spheroidal wave functions. Compute a sequence of characteristic values for the oblate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(m) and isscalar(n) and isscalar(c)): raise ValueError("Arguments must be scalars.") if (n != floor(n)) or (m != floor(m)): raise ValueError("Modes must be integers.") if (n-m > 199): raise ValueError("Difference between n and m is too large.") maxL = n-m+1 return specfun.segv(m, n, c, -1)[1][:maxL] def ellipk(m): r"""Complete elliptic integral of the first kind. This function is defined as .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt Parameters ---------- m : array_like The parameter of the elliptic integral. Returns ------- K : array_like Value of the elliptic integral. Notes ----- For more precision around point m = 1, use `ellipkm1`, which this function calls. The parameterization in terms of :math:`m` follows that of section 17.2 in [1]_. Other parameterizations in terms of the complementary parameter :math:`1 - m`, modular angle :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also used, so be careful that you choose the correct parameter. See Also -------- ellipkm1 : Complete elliptic integral of the first kind around m = 1 ellipkinc : Incomplete elliptic integral of the first kind ellipe : Complete elliptic integral of the second kind ellipeinc : Incomplete elliptic integral of the second kind References ---------- .. [1] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ return ellipkm1(1 - asarray(m)) def comb(N, k, exact=False, repetition=False): """The number of combinations of N things taken k at a time. This is often expressed as "N choose k". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional If `exact` is False, then floating point precision is used, otherwise exact long integer is computed. repetition : bool, optional If `repetition` is True, then the number of combinations with repetition is computed. Returns ------- val : int, float, ndarray The total number of combinations. See Also -------- binom : Binomial coefficient ufunc Notes ----- - Array arguments accepted only for exact=False case. - If k > N, N < 0, or k < 0, then a 0 is returned. Examples -------- >>> from scipy.special import comb >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> comb(n, k, exact=False) array([ 120., 210.]) >>> comb(10, 3, exact=True) 120L >>> comb(10, 3, exact=True, repetition=True) 220L """ if repetition: return comb(N + k - 1, k, exact) if exact: return _comb_int(N, k) else: k, N = asarray(k), asarray(N) cond = (k <= N) & (N >= 0) & (k >= 0) vals = binom(N, k) if isinstance(vals, np.ndarray): vals[~cond] = 0 elif not cond: vals = np.float64(0) return vals def perm(N, k, exact=False): """Permutations of N things taken k at a time, i.e., k-permutations of N. It's also known as "partial permutations". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional If `exact` is False, then floating point precision is used, otherwise exact long integer is computed. Returns ------- val : int, ndarray The number of k-permutations of N. Notes ----- - Array arguments accepted only for exact=False case. - If k > N, N < 0, or k < 0, then a 0 is returned. Examples -------- >>> from scipy.special import perm >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> perm(n, k) array([ 720., 5040.]) >>> perm(10, 3, exact=True) 720 """ if exact: if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for i in xrange(N - k + 1, N + 1): val *= i return val else: k, N = asarray(k), asarray(N) cond = (k <= N) & (N >= 0) & (k >= 0) vals = poch(N - k + 1, k) if isinstance(vals, np.ndarray): vals[~cond] = 0 elif not cond: vals = np.float64(0) return vals # http://stackoverflow.com/a/16327037/125507 def _range_prod(lo, hi): """ Product of a range of numbers. Returns the product of lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi = hi! / (lo-1)! Breaks into smaller products first for speed: _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9)) """ if lo + 1 < hi: mid = (hi + lo) // 2 return _range_prod(lo, mid) * _range_prod(mid + 1, hi) if lo == hi: return lo return lo * hi def factorial(n, exact=False): """ The factorial of a number or array of numbers. The factorial of non-negative integer `n` is the product of all positive integers less than or equal to `n`:: n! = n * (n - 1) * (n - 2) * ... * 1 Parameters ---------- n : int or array_like of ints Input values. If ``n < 0``, the return value is 0. exact : bool, optional If True, calculate the answer exactly using long integer arithmetic. If False, result is approximated in floating point rapidly using the `gamma` function. Default is False. Returns ------- nf : float or int or ndarray Factorial of `n`, as integer or float depending on `exact`. Notes ----- For arrays with ``exact=True``, the factorial is computed only once, for the largest input, with each other result computed in the process. The output dtype is increased to ``int64`` or ``object`` if necessary. With ``exact=False`` the factorial is approximated using the gamma function: .. math:: n! = \\Gamma(n+1) Examples -------- >>> from scipy.special import factorial >>> arr = np.array([3, 4, 5]) >>> factorial(arr, exact=False) array([ 6., 24., 120.]) >>> factorial(arr, exact=True) array([ 6, 24, 120]) >>> factorial(5, exact=True) 120L """ if exact: if np.ndim(n) == 0: return 0 if n < 0 else math.factorial(n) else: n = asarray(n) un = np.unique(n).astype(object) # Convert to object array of long ints if np.int can't handle size if un[-1] > 20: dt = object elif un[-1] > 12: dt = np.int64 else: dt = np.int out = np.empty_like(n, dtype=dt) # Handle invalid/trivial values un = un[un > 1] out[n < 2] = 1 out[n < 0] = 0 # Calculate products of each range of numbers if un.size: val = math.factorial(un[0]) out[n == un[0]] = val for i in xrange(len(un) - 1): prev = un[i] + 1 current = un[i + 1] val *= _range_prod(prev, current) out[n == current] = val return out else: n = asarray(n) vals = gamma(n + 1) return where(n >= 0, vals, 0) def factorial2(n, exact=False): """Double factorial. This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as:: n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd = 2**(n/2) * (n/2)! n even Parameters ---------- n : int or array_like Calculate ``n!!``. Arrays are only supported with `exact` set to False. If ``n < 0``, the return value is 0. exact : bool, optional The result can be approximated rapidly using the gamma-formula above (default). If `exact` is set to True, calculate the answer exactly using integer arithmetic. Returns ------- nff : float or int Double factorial of `n`, as an int or a float depending on `exact`. Examples -------- >>> from scipy.special import factorial2 >>> factorial2(7, exact=False) array(105.00000000000001) >>> factorial2(7, exact=True) 105L """ if exact: if n < -1: return 0 if n <= 0: return 1 val = 1 for k in xrange(n, 0, -2): val *= k return val else: n = asarray(n) vals = zeros(n.shape, 'd') cond1 = (n % 2) & (n >= -1) cond2 = (1-(n % 2)) & (n >= -1) oddn = extract(cond1, n) evenn = extract(cond2, n) nd2o = oddn / 2.0 nd2e = evenn / 2.0 place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5)) place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e)) return vals def factorialk(n, k, exact=True): """Multifactorial of n of order k, n(!!...!). This is the multifactorial of n skipping k values. For example, factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 In particular, for any integer ``n``, we have factorialk(n, 1) = factorial(n) factorialk(n, 2) = factorial2(n) Parameters ---------- n : int Calculate multifactorial. If `n` < 0, the return value is 0. k : int Order of multifactorial. exact : bool, optional If exact is set to True, calculate the answer exactly using integer arithmetic. Returns ------- val : int Multifactorial of `n`. Raises ------ NotImplementedError Raises when exact is False Examples -------- >>> from scipy.special import factorialk >>> factorialk(5, 1, exact=True) 120L >>> factorialk(5, 3, exact=True) 10L """ if exact: if n < 1-k: return 0 if n <= 0: return 1 val = 1 for j in xrange(n, 0, -k): val = val*j return val else: raise NotImplementedError def zeta(x, q=None, out=None): r""" Riemann or Hurwitz zeta function. Parameters ---------- x : array_like of float Input data, must be real q : array_like of float, optional Input data, must be real. Defaults to Riemann zeta. out : ndarray, optional Output array for the computed values. Returns ------- out : array_like Values of zeta(x). Notes ----- The two-argument version is the Hurwitz zeta function: .. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}, Riemann zeta function corresponds to ``q = 1``. See Also -------- zetac Examples -------- >>> from scipy.special import zeta, polygamma, factorial Some specific values: >>> zeta(2), np.pi**2/6 (1.6449340668482266, 1.6449340668482264) >>> zeta(4), np.pi**4/90 (1.0823232337111381, 1.082323233711138) Relation to the `polygamma` function: >>> m = 3 >>> x = 1.25 >>> polygamma(m, x) array(2.782144009188397) >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) 2.7821440091883969 """ if q is None: q = 1 return _zeta(x, q, out)
64,492
28.529762
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/lambertw.py
from __future__ import division, print_function, absolute_import from ._ufuncs import _lambertw def lambertw(z, k=0, tol=1e-8): r""" lambertw(z, k=0, tol=1e-8) Lambert W function. The Lambert W function `W(z)` is defined as the inverse function of ``w * exp(w)``. In other words, the value of ``W(z)`` is such that ``z = W(z) * exp(W(z))`` for any complex number ``z``. The Lambert W function is a multivalued function with infinitely many branches. Each branch gives a separate solution of the equation ``z = w exp(w)``. Here, the branches are indexed by the integer `k`. Parameters ---------- z : array_like Input argument. k : int, optional Branch index. tol : float, optional Evaluation tolerance. Returns ------- w : array `w` will have the same shape as `z`. Notes ----- All branches are supported by `lambertw`: * ``lambertw(z)`` gives the principal solution (branch 0) * ``lambertw(z, k)`` gives the solution on branch `k` The Lambert W function has two partially real branches: the principal branch (`k = 0`) is real for real ``z > -1/e``, and the ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except ``k = 0`` have a logarithmic singularity at ``z = 0``. **Possible issues** The evaluation can become inaccurate very close to the branch point at ``-1/e``. In some corner cases, `lambertw` might currently fail to converge, or can end up on the wrong branch. **Algorithm** Halley's iteration is used to invert ``w * exp(w)``, using a first-order asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate. The definition, implementation and choice of branches is based on [2]_. See Also -------- wrightomega : the Wright Omega function References ---------- .. [1] http://en.wikipedia.org/wiki/Lambert_W_function .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5 (1996) 329-359. http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf Examples -------- The Lambert W function is the inverse of ``w exp(w)``: >>> from scipy.special import lambertw >>> w = lambertw(1) >>> w (0.56714329040978384+0j) >>> w * np.exp(w) (1.0+0j) Any branch gives a valid inverse: >>> w = lambertw(1, k=3) >>> w (-2.8535817554090377+17.113535539412148j) >>> w*np.exp(w) (1.0000000000000002+1.609823385706477e-15j) **Applications to equation-solving** The Lambert W function may be used to solve various kinds of equations, such as finding the value of the infinite power tower :math:`z^{z^{z^{\ldots}}}`: >>> def tower(z, n): ... if n == 0: ... return z ... return z ** tower(z, n-1) ... >>> tower(0.5, 100) 0.641185744504986 >>> -lambertw(-np.log(0.5)) / np.log(0.5) (0.64118574450498589+0j) """ return _lambertw(z, k, tol)
3,041
27.166667
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_logit.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose) from scipy.special import logit, expit class TestLogit(object): def check_logit_out(self, dtype, expected): a = np.linspace(0,1,10) a = np.array(a, dtype=dtype) olderr = np.seterr(divide='ignore') try: actual = logit(a) finally: np.seterr(**olderr) assert_almost_equal(actual, expected) assert_equal(actual.dtype, np.dtype(dtype)) def test_float32(self): expected = np.array([-np.inf, -2.07944155, -1.25276291, -0.69314718, -0.22314353, 0.22314365, 0.6931473, 1.25276303, 2.07944155, np.inf], dtype=np.float32) self.check_logit_out('f4', expected) def test_float64(self): expected = np.array([-np.inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, 0.22314355, 0.69314718, 1.25276297, 2.07944154, np.inf]) self.check_logit_out('f8', expected) def test_nan(self): expected = np.array([np.nan]*4) olderr = np.seterr(invalid='ignore') try: actual = logit(np.array([-3., -2., 2., 3.])) finally: np.seterr(**olderr) assert_equal(expected, actual) class TestExpit(object): def check_expit_out(self, dtype, expected): a = np.linspace(-4,4,10) a = np.array(a, dtype=dtype) actual = expit(a) assert_almost_equal(actual, expected) assert_equal(actual.dtype, np.dtype(dtype)) def test_float32(self): expected = np.array([0.01798621, 0.04265125, 0.09777259, 0.20860852, 0.39068246, 0.60931754, 0.79139149, 0.9022274, 0.95734876, 0.98201376], dtype=np.float32) self.check_expit_out('f4',expected) def test_float64(self): expected = np.array([0.01798621, 0.04265125, 0.0977726, 0.20860853, 0.39068246, 0.60931754, 0.79139147, 0.9022274, 0.95734875, 0.98201379]) self.check_expit_out('f8', expected) def test_large(self): for dtype in (np.float32, np.float64, np.longdouble): for n in (88, 89, 709, 710, 11356, 11357): n = np.array(n, dtype=dtype) assert_allclose(expit(n), 1.0, atol=1e-20) assert_allclose(expit(-n), 0.0, atol=1e-20) assert_equal(expit(n).dtype, dtype) assert_equal(expit(-n).dtype, dtype)
2,911
34.512195
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_precompute_gammainc.py
from __future__ import division, print_function, absolute_import import numpy as np import pytest from scipy.special._testutils import MissingModule, check_version from scipy.special._mptestutils import ( Arg, IntArg, mp_assert_allclose, assert_mpmath_equal) from scipy.special._precompute.gammainc_asy import ( compute_g, compute_alpha, compute_d) from scipy.special._precompute.gammainc_data import gammainc, gammaincc try: import sympy except ImportError: sympy = MissingModule('sympy') try: import mpmath as mp except ImportError: mp = MissingModule('mpmath') _is_32bit_platform = np.intp(0).itemsize < 8 @check_version(mp, '0.19') def test_g(): # Test data for the g_k. See DLMF 5.11.4. with mp.workdps(30): g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288, -mp.mpf(139)/51840, -mp.mpf(571)/2488320, mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800] mp_assert_allclose(compute_g(7), g) @pytest.mark.slow @check_version(mp, '0.19') @check_version(sympy, '0.7') @pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-11, see gh-6938") def test_alpha(): # Test data for the alpha_k. See DLMF 8.12.14. with mp.workdps(30): alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36, -mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010, -mp.mpf(139)/5443200, mp.mpf(1)/204120] mp_assert_allclose(compute_alpha(9), alpha) @pytest.mark.xslow @check_version(mp, '0.19') @check_version(sympy, '0.7') def test_d(): # Compare the d_{k, n} to the results in appendix F of [1]. # # Sources # ------- # [1] DiDonato and Morris, Computation of the Incomplete Gamma # Function Ratios and their Inverse, ACM Transactions on # Mathematical Software, 1986. with mp.workdps(50): dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')), (0, 12, mp.mpf('0.102618097842403080425739573227e-7')), (1, 0, -mp.mpf('0.185185185185185185185185185185e-2')), (1, 12, mp.mpf('0.119516285997781473243076536700e-7')), (2, 0, mp.mpf('0.413359788359788359788359788360e-2')), (2, 12, -mp.mpf('0.140925299108675210532930244154e-7')), (3, 0, mp.mpf('0.649434156378600823045267489712e-3')), (3, 12, -mp.mpf('0.191111684859736540606728140873e-7')), (4, 0, -mp.mpf('0.861888290916711698604702719929e-3')), (4, 12, mp.mpf('0.288658297427087836297341274604e-7')), (5, 0, -mp.mpf('0.336798553366358150308767592718e-3')), (5, 12, mp.mpf('0.482409670378941807563762631739e-7')), (6, 0, mp.mpf('0.531307936463992223165748542978e-3')), (6, 12, -mp.mpf('0.882860074633048352505085243179e-7')), (7, 0, mp.mpf('0.344367606892377671254279625109e-3')), (7, 12, -mp.mpf('0.175629733590604619378669693914e-6')), (8, 0, -mp.mpf('0.652623918595309418922034919727e-3')), (8, 12, mp.mpf('0.377358774161109793380344937299e-6')), (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')), (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))] d = compute_d(10, 13) res = [] for k, n, std in dataset: res.append(d[k][n]) std = map(lambda x: x[2], dataset) mp_assert_allclose(res, std) @check_version(mp, '0.19') def test_gammainc(): # Quick check that the gammainc in # special._precompute.gammainc_data agrees with mpmath's # gammainc. assert_mpmath_equal(gammainc, lambda a, x: mp.gammainc(a, b=x, regularized=True), [Arg(0, 100, inclusive_a=False), Arg(0, 100)], nan_ok=False, rtol=1e-17, n=50, dps=50) @pytest.mark.xslow @check_version(mp, '0.19') def test_gammaincc(): # Check that the gammaincc in special._precompute.gammainc_data # agrees with mpmath's gammainc. assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000), lambda a, x: mp.gammainc(a, a=x, regularized=True), [Arg(20, 100), Arg(20, 100)], nan_ok=False, rtol=1e-17, n=50, dps=1000) # Test the fast integer path assert_mpmath_equal(gammaincc, lambda a, x: mp.gammainc(a, a=x, regularized=True), [IntArg(1, 100), Arg(0, 100)], nan_ok=False, rtol=1e-17, n=50, dps=50)
4,658
38.820513
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_data.py
from __future__ import division, print_function, absolute_import import os import numpy as np from numpy import arccosh, arcsinh, arctanh from scipy._lib._numpy_compat import suppress_warnings import pytest from scipy.special import ( lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite, eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta, jn, jv, yn, yv, iv, kv, kn, gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma, beta, betainc, betaincinv, poch, ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, ellipj, erf, erfc, erfinv, erfcinv, exp1, expi, expn, bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib, nbdtrik, pdtrik, owens_t, mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1, mathieu_modsem1, mathieu_modcem2, mathieu_modsem2, ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, ) from scipy.integrate import IntegrationWarning from scipy.special._testutils import FuncData DATASETS_BOOST = np.load(os.path.join(os.path.dirname(__file__), "data", "boost.npz")) DATASETS_GSL = np.load(os.path.join(os.path.dirname(__file__), "data", "gsl.npz")) DATASETS_LOCAL = np.load(os.path.join(os.path.dirname(__file__), "data", "local.npz")) def data(func, dataname, *a, **kw): kw.setdefault('dataname', dataname) return FuncData(func, DATASETS_BOOST[dataname], *a, **kw) def data_gsl(func, dataname, *a, **kw): kw.setdefault('dataname', dataname) return FuncData(func, DATASETS_GSL[dataname], *a, **kw) def data_local(func, dataname, *a, **kw): kw.setdefault('dataname', dataname) return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw) def ellipk_(k): return ellipk(k*k) def ellipkinc_(f, k): return ellipkinc(f, k*k) def ellipe_(k): return ellipe(k*k) def ellipeinc_(f, k): return ellipeinc(f, k*k) def ellipj_(k): return ellipj(k*k) def zeta_(x): return zeta(x, 1.) def assoc_legendre_p_boost_(nu, mu, x): # the boost test data is for integer orders only return lpmv(mu, nu.astype(int), x) def legendre_p_via_assoc_(nu, x): return lpmv(0, nu, x) def lpn_(n, x): return lpn(n.astype('l'), x)[0][-1] def lqn_(n, x): return lqn(n.astype('l'), x)[0][-1] def legendre_p_via_lpmn(n, x): return lpmn(0, n, x)[0][0,-1] def legendre_q_via_lqmn(n, x): return lqmn(0, n, x)[0][0,-1] def mathieu_ce_rad(m, q, x): return mathieu_cem(m, q, x*180/np.pi)[0] def mathieu_se_rad(m, q, x): return mathieu_sem(m, q, x*180/np.pi)[0] def mathieu_mc1_scaled(m, q, x): # GSL follows a different normalization. # We follow Abramowitz & Stegun, they apparently something else. return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2) def mathieu_ms1_scaled(m, q, x): return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2) def mathieu_mc2_scaled(m, q, x): return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2) def mathieu_ms2_scaled(m, q, x): return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2) def eval_legendre_ld(n, x): return eval_legendre(n.astype('l'), x) def eval_legendre_dd(n, x): return eval_legendre(n.astype('d'), x) def eval_hermite_ld(n, x): return eval_hermite(n.astype('l'), x) def eval_laguerre_ld(n, x): return eval_laguerre(n.astype('l'), x) def eval_laguerre_dd(n, x): return eval_laguerre(n.astype('d'), x) def eval_genlaguerre_ldd(n, a, x): return eval_genlaguerre(n.astype('l'), a, x) def eval_genlaguerre_ddd(n, a, x): return eval_genlaguerre(n.astype('d'), a, x) def bdtrik_comp(y, n, p): return bdtrik(1-y, n, p) def btdtri_comp(a, b, p): return btdtri(a, b, 1-p) def btdtria_comp(p, b, x): return btdtria(1-p, b, x) def btdtrib_comp(a, p, x): return btdtrib(a, 1-p, x) def gdtr_(p, x): return gdtr(1.0, p, x) def gdtrc_(p, x): return gdtrc(1.0, p, x) def gdtrix_(b, p): return gdtrix(1.0, b, p) def gdtrix_comp(b, p): return gdtrix(1.0, b, 1-p) def gdtrib_(p, x): return gdtrib(1.0, p, x) def gdtrib_comp(p, x): return gdtrib(1.0, 1-p, x) def nbdtrik_comp(y, n, p): return nbdtrik(1-y, n, p) def pdtrik_comp(p, m): return pdtrik(1-p, m) def poch_(z, m): return 1.0 / poch(z, m) def poch_minus(z, m): return 1.0 / poch(z, -m) def spherical_jn_(n, x): return spherical_jn(n.astype('l'), x) def spherical_yn_(n, x): return spherical_yn(n.astype('l'), x) def sph_harm_(m, n, theta, phi): y = sph_harm(m, n, theta, phi) return (y.real, y.imag) def cexpm1(x, y): z = expm1(x + 1j*y) return z.real, z.imag def clog1p(x, y): z = log1p(x + 1j*y) return z.real, z.imag BOOST_TESTS = [ data(arccosh, 'acosh_data_ipp-acosh_data', 0, 1, rtol=5e-13), data(arccosh, 'acosh_data_ipp-acosh_data', 0j, 1, rtol=5e-13), data(arcsinh, 'asinh_data_ipp-asinh_data', 0, 1, rtol=1e-11), data(arcsinh, 'asinh_data_ipp-asinh_data', 0j, 1, rtol=1e-11), data(arctanh, 'atanh_data_ipp-atanh_data', 0, 1, rtol=1e-11), data(arctanh, 'atanh_data_ipp-atanh_data', 0j, 1, rtol=1e-11), data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', (0,1,2), 3, rtol=1e-11), data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=1e-11), data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14), data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False), data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14, vectorized=False), data(lpn_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False), data(lpn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=3e-13, vectorized=False), data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=6e-14), data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13), data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=2e-14), data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13), data(lqn_, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False), data(lqn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False), data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False), data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False), data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), data(beta, 'beta_small_data_ipp-beta_small_data', (0,1), 2), data(beta, 'beta_med_data_ipp-beta_med_data', (0,1), 2, rtol=5e-13), data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15), data(betainc, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=5e-13), data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14), data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10), data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5), data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15), data(btdtr, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=4e-13), data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14), data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10), data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5), data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 4, rtol=8e-7), data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 3, rtol=5e-9), data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 4, rtol=5e-9), data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 5, rtol=5e-9), data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 6, rtol=5e-9), data(binom, 'binomial_data_ipp-binomial_data', (0,1), 2, rtol=1e-13), data(binom, 'binomial_large_data_ipp-binomial_large_data', (0,1), 2, rtol=5e-13), data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 3, rtol=5e-9), data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 4, rtol=5e-9), data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 3, rtol=4e-9), data(nbdtrik_comp, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 4, rtol=4e-9), data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 2, rtol=3e-9), data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 3, rtol=4e-9), data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0), data(digamma, 'digamma_data_ipp-digamma_data', 0, 1), data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1), data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13), data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13), data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15), data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15), data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15), data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14), data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1), data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14), data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1), data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14), data(erf, 'erf_data_ipp-erf_data', 0, 1), data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13), data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15), data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1), data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1), data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14), data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1), data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13), data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2), data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1), data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1), data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data2', 0, 1), data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13), data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9), data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13), data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13), data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2), data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14), data(gamma, 'test_gamma_data_ipp-near_0', 0, 1), data(gamma, 'test_gamma_data_ipp-near_1', 0, 1), data(gamma, 'test_gamma_data_ipp-near_2', 0, 1), data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1), data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12), data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14), data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9), data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9), data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9), data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9), data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9), data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13), data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11), data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11), data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10), data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11), data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11), data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2), data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15), data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12), data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13), data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9), data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9), data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9), data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 2, rtol=2e-13), data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 2,), data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 2,), data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 3, rtol=2e-13), data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 3), data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 3), data(eval_hermite_ld, 'hermite_ipp-hermite', (0,1), 2, rtol=2e-14), data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', (0,1), 2, rtol=7e-12), data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'), data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, rtol=2e-13), data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'), data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1), data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2), data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12), data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306), data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9), data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10), data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11), data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11), data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12), data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12), data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12), data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12), data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12), data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12), data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12), data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10), data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10), data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)), data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)), data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)), data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)), data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=1e-11), data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=1e-14), data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2, rtol=1e-11), data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 3, rtol=1e-12), data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=1e-14), data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3, rtol=1e-14), data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'), data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=3e-15), data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2), data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, knownfailure='gdtrix bad some points'), data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=6e-15), data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3), data(chndtr, 'nccs_ipp-nccs', (2,0,1), 3, rtol=3e-5), data(chndtr, 'nccs_big_ipp-nccs_big', (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'), data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', (1,0,3,2), (4,5), rtol=5e-11, param_filter=(lambda p: np.ones(p.shape, '?'), lambda p: np.ones(p.shape, '?'), lambda p: np.logical_and(p < 2*np.pi, p >= 0), lambda p: np.logical_and(p < np.pi, p >= 0))), data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', (0,1), 2, rtol=1e-13), data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', (0,1), 2, rtol=8e-15), data(owens_t, 'owenst_data_ipp-owens_t', (0, 1), 2, rtol=5e-14), data(owens_t, 'owenst_data_ipp-owens_t_alarge', (0, 1), 2, rtol=5e-15), # -- not used yet (function does not exist in scipy): # 'ellint_pi2_data_ipp-ellint_pi2_data', # 'ellint_pi3_data_ipp-ellint_pi3_data', # 'ellint_pi3_large_data_ipp-ellint_pi3_large_data', # 'ellint_rc_data_ipp-ellint_rc_data', # 'ellint_rd_data_ipp-ellint_rd_data', # 'ellint_rf_data_ipp-ellint_rf_data', # 'ellint_rj_data_ipp-ellint_rj_data', # 'ncbeta_big_ipp-ncbeta_big', # 'ncbeta_ipp-ncbeta', # 'powm1_sqrtp1m1_test_cpp-powm1_data', # 'powm1_sqrtp1m1_test_cpp-sqrtp1m1_data', # 'test_gamma_data_ipp-gammap1m1_data', # 'tgamma_ratio_data_ipp-tgamma_ratio_data', ] @pytest.mark.parametrize('test', BOOST_TESTS, ids=repr) def test_boost(test): _test_factory(test) GSL_TESTS = [ data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13), data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13), # Also the GSL output has limited accuracy... data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', (0, 1, 2), 5, rtol=1e-7, atol=1e-13), data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', (0, 1, 2), 6, rtol=1e-7, atol=1e-13), ] @pytest.mark.parametrize('test', GSL_TESTS, ids=repr) def test_gsl(test): _test_factory(test) LOCAL_TESTS = [ data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2), data_local(ellipkm1, 'ellipkm1', 0, 1), data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2), data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14), data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14), data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12), data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11), data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13), data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13), ] @pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr) def test_local(test): _test_factory(test) def _test_factory(test, dtype=np.double): """Boost test""" with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected") olderr = np.seterr(all='ignore') try: test.check(dtype=dtype) finally: np.seterr(**olderr)
22,193
43.299401
135
py